Home | History | Annotate | Download | only in X86
      1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file defines a DAG pattern matching instruction selector for X86,
     11 // converting from a legalized dag to a X86 dag.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 #define DEBUG_TYPE "x86-isel"
     16 #include "X86.h"
     17 #include "X86InstrBuilder.h"
     18 #include "X86MachineFunctionInfo.h"
     19 #include "X86RegisterInfo.h"
     20 #include "X86Subtarget.h"
     21 #include "X86TargetMachine.h"
     22 #include "llvm/Instructions.h"
     23 #include "llvm/Intrinsics.h"
     24 #include "llvm/Type.h"
     25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
     26 #include "llvm/CodeGen/MachineConstantPool.h"
     27 #include "llvm/CodeGen/MachineFunction.h"
     28 #include "llvm/CodeGen/MachineFrameInfo.h"
     29 #include "llvm/CodeGen/MachineInstrBuilder.h"
     30 #include "llvm/CodeGen/MachineRegisterInfo.h"
     31 #include "llvm/CodeGen/SelectionDAGISel.h"
     32 #include "llvm/Target/TargetMachine.h"
     33 #include "llvm/Target/TargetOptions.h"
     34 #include "llvm/Support/CFG.h"
     35 #include "llvm/Support/Debug.h"
     36 #include "llvm/Support/ErrorHandling.h"
     37 #include "llvm/Support/MathExtras.h"
     38 #include "llvm/Support/raw_ostream.h"
     39 #include "llvm/ADT/Statistic.h"
     40 using namespace llvm;
     41 
     42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
     43 
     44 //===----------------------------------------------------------------------===//
     45 //                      Pattern Matcher Implementation
     46 //===----------------------------------------------------------------------===//
     47 
     48 namespace {
     49   /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
     50   /// SDValue's instead of register numbers for the leaves of the matched
     51   /// tree.
     52   struct X86ISelAddressMode {
     53     enum {
     54       RegBase,
     55       FrameIndexBase
     56     } BaseType;
     57 
     58     // This is really a union, discriminated by BaseType!
     59     SDValue Base_Reg;
     60     int Base_FrameIndex;
     61 
     62     unsigned Scale;
     63     SDValue IndexReg;
     64     int32_t Disp;
     65     SDValue Segment;
     66     const GlobalValue *GV;
     67     const Constant *CP;
     68     const BlockAddress *BlockAddr;
     69     const char *ES;
     70     int JT;
     71     unsigned Align;    // CP alignment.
     72     unsigned char SymbolFlags;  // X86II::MO_*
     73 
     74     X86ISelAddressMode()
     75       : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
     76         Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
     77         SymbolFlags(X86II::MO_NO_FLAG) {
     78     }
     79 
     80     bool hasSymbolicDisplacement() const {
     81       return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
     82     }
     83 
     84     bool hasBaseOrIndexReg() const {
     85       return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
     86     }
     87 
     88     /// isRIPRelative - Return true if this addressing mode is already RIP
     89     /// relative.
     90     bool isRIPRelative() const {
     91       if (BaseType != RegBase) return false;
     92       if (RegisterSDNode *RegNode =
     93             dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
     94         return RegNode->getReg() == X86::RIP;
     95       return false;
     96     }
     97 
     98     void setBaseReg(SDValue Reg) {
     99       BaseType = RegBase;
    100       Base_Reg = Reg;
    101     }
    102 
    103     void dump() {
    104       dbgs() << "X86ISelAddressMode " << this << '\n';
    105       dbgs() << "Base_Reg ";
    106       if (Base_Reg.getNode() != 0)
    107         Base_Reg.getNode()->dump();
    108       else
    109         dbgs() << "nul";
    110       dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
    111              << " Scale" << Scale << '\n'
    112              << "IndexReg ";
    113       if (IndexReg.getNode() != 0)
    114         IndexReg.getNode()->dump();
    115       else
    116         dbgs() << "nul";
    117       dbgs() << " Disp " << Disp << '\n'
    118              << "GV ";
    119       if (GV)
    120         GV->dump();
    121       else
    122         dbgs() << "nul";
    123       dbgs() << " CP ";
    124       if (CP)
    125         CP->dump();
    126       else
    127         dbgs() << "nul";
    128       dbgs() << '\n'
    129              << "ES ";
    130       if (ES)
    131         dbgs() << ES;
    132       else
    133         dbgs() << "nul";
    134       dbgs() << " JT" << JT << " Align" << Align << '\n';
    135     }
    136   };
    137 }
    138 
    139 namespace {
    140   //===--------------------------------------------------------------------===//
    141   /// ISel - X86 specific code to select X86 machine instructions for
    142   /// SelectionDAG operations.
    143   ///
    144   class X86DAGToDAGISel : public SelectionDAGISel {
    145     /// X86Lowering - This object fully describes how to lower LLVM code to an
    146     /// X86-specific SelectionDAG.
    147     const X86TargetLowering &X86Lowering;
    148 
    149     /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
    150     /// make the right decision when generating code for different targets.
    151     const X86Subtarget *Subtarget;
    152 
    153     /// OptForSize - If true, selector should try to optimize for code size
    154     /// instead of performance.
    155     bool OptForSize;
    156 
    157   public:
    158     explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
    159       : SelectionDAGISel(tm, OptLevel),
    160         X86Lowering(*tm.getTargetLowering()),
    161         Subtarget(&tm.getSubtarget<X86Subtarget>()),
    162         OptForSize(false) {}
    163 
    164     virtual const char *getPassName() const {
    165       return "X86 DAG->DAG Instruction Selection";
    166     }
    167 
    168     virtual void EmitFunctionEntryCode();
    169 
    170     virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
    171 
    172     virtual void PreprocessISelDAG();
    173 
    174     inline bool immSext8(SDNode *N) const {
    175       return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
    176     }
    177 
    178     // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
    179     // sign extended field.
    180     inline bool i64immSExt32(SDNode *N) const {
    181       uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
    182       return (int64_t)v == (int32_t)v;
    183     }
    184 
    185 // Include the pieces autogenerated from the target description.
    186 #include "X86GenDAGISel.inc"
    187 
    188   private:
    189     SDNode *Select(SDNode *N);
    190     SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
    191     SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
    192     SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
    193 
    194     bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
    195     bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
    196     bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
    197     bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
    198     bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
    199                                  unsigned Depth);
    200     bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
    201     bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
    202                     SDValue &Scale, SDValue &Index, SDValue &Disp,
    203                     SDValue &Segment);
    204     bool SelectLEAAddr(SDValue N, SDValue &Base,
    205                        SDValue &Scale, SDValue &Index, SDValue &Disp,
    206                        SDValue &Segment);
    207     bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
    208                            SDValue &Scale, SDValue &Index, SDValue &Disp,
    209                            SDValue &Segment);
    210     bool SelectScalarSSELoad(SDNode *Root, SDValue N,
    211                              SDValue &Base, SDValue &Scale,
    212                              SDValue &Index, SDValue &Disp,
    213                              SDValue &Segment,
    214                              SDValue &NodeWithChain);
    215 
    216     bool TryFoldLoad(SDNode *P, SDValue N,
    217                      SDValue &Base, SDValue &Scale,
    218                      SDValue &Index, SDValue &Disp,
    219                      SDValue &Segment);
    220 
    221     /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
    222     /// inline asm expressions.
    223     virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
    224                                               char ConstraintCode,
    225                                               std::vector<SDValue> &OutOps);
    226 
    227     void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
    228 
    229     inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
    230                                    SDValue &Scale, SDValue &Index,
    231                                    SDValue &Disp, SDValue &Segment) {
    232       Base  = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
    233         CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
    234         AM.Base_Reg;
    235       Scale = getI8Imm(AM.Scale);
    236       Index = AM.IndexReg;
    237       // These are 32-bit even in 64-bit mode since RIP relative offset
    238       // is 32-bit.
    239       if (AM.GV)
    240         Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
    241                                               MVT::i32, AM.Disp,
    242                                               AM.SymbolFlags);
    243       else if (AM.CP)
    244         Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
    245                                              AM.Align, AM.Disp, AM.SymbolFlags);
    246       else if (AM.ES)
    247         Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
    248       else if (AM.JT != -1)
    249         Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
    250       else if (AM.BlockAddr)
    251         Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
    252                                        true, AM.SymbolFlags);
    253       else
    254         Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
    255 
    256       if (AM.Segment.getNode())
    257         Segment = AM.Segment;
    258       else
    259         Segment = CurDAG->getRegister(0, MVT::i32);
    260     }
    261 
    262     /// getI8Imm - Return a target constant with the specified value, of type
    263     /// i8.
    264     inline SDValue getI8Imm(unsigned Imm) {
    265       return CurDAG->getTargetConstant(Imm, MVT::i8);
    266     }
    267 
    268     /// getI32Imm - Return a target constant with the specified value, of type
    269     /// i32.
    270     inline SDValue getI32Imm(unsigned Imm) {
    271       return CurDAG->getTargetConstant(Imm, MVT::i32);
    272     }
    273 
    274     /// getGlobalBaseReg - Return an SDNode that returns the value of
    275     /// the global base register. Output instructions required to
    276     /// initialize the global base register, if necessary.
    277     ///
    278     SDNode *getGlobalBaseReg();
    279 
    280     /// getTargetMachine - Return a reference to the TargetMachine, casted
    281     /// to the target-specific type.
    282     const X86TargetMachine &getTargetMachine() {
    283       return static_cast<const X86TargetMachine &>(TM);
    284     }
    285 
    286     /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
    287     /// to the target-specific type.
    288     const X86InstrInfo *getInstrInfo() {
    289       return getTargetMachine().getInstrInfo();
    290     }
    291   };
    292 }
    293 
    294 
    295 bool
    296 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
    297   if (OptLevel == CodeGenOpt::None) return false;
    298 
    299   if (!N.hasOneUse())
    300     return false;
    301 
    302   if (N.getOpcode() != ISD::LOAD)
    303     return true;
    304 
    305   // If N is a load, do additional profitability checks.
    306   if (U == Root) {
    307     switch (U->getOpcode()) {
    308     default: break;
    309     case X86ISD::ADD:
    310     case X86ISD::SUB:
    311     case X86ISD::AND:
    312     case X86ISD::XOR:
    313     case X86ISD::OR:
    314     case ISD::ADD:
    315     case ISD::ADDC:
    316     case ISD::ADDE:
    317     case ISD::AND:
    318     case ISD::OR:
    319     case ISD::XOR: {
    320       SDValue Op1 = U->getOperand(1);
    321 
    322       // If the other operand is a 8-bit immediate we should fold the immediate
    323       // instead. This reduces code size.
    324       // e.g.
    325       // movl 4(%esp), %eax
    326       // addl $4, %eax
    327       // vs.
    328       // movl $4, %eax
    329       // addl 4(%esp), %eax
    330       // The former is 2 bytes shorter. In case where the increment is 1, then
    331       // the saving can be 4 bytes (by using incl %eax).
    332       if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
    333         if (Imm->getAPIntValue().isSignedIntN(8))
    334           return false;
    335 
    336       // If the other operand is a TLS address, we should fold it instead.
    337       // This produces
    338       // movl    %gs:0, %eax
    339       // leal    i@NTPOFF(%eax), %eax
    340       // instead of
    341       // movl    $i@NTPOFF, %eax
    342       // addl    %gs:0, %eax
    343       // if the block also has an access to a second TLS address this will save
    344       // a load.
    345       // FIXME: This is probably also true for non TLS addresses.
    346       if (Op1.getOpcode() == X86ISD::Wrapper) {
    347         SDValue Val = Op1.getOperand(0);
    348         if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
    349           return false;
    350       }
    351     }
    352     }
    353   }
    354 
    355   return true;
    356 }
    357 
    358 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
    359 /// load's chain operand and move load below the call's chain operand.
    360 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
    361                                   SDValue Call, SDValue OrigChain) {
    362   SmallVector<SDValue, 8> Ops;
    363   SDValue Chain = OrigChain.getOperand(0);
    364   if (Chain.getNode() == Load.getNode())
    365     Ops.push_back(Load.getOperand(0));
    366   else {
    367     assert(Chain.getOpcode() == ISD::TokenFactor &&
    368            "Unexpected chain operand");
    369     for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
    370       if (Chain.getOperand(i).getNode() == Load.getNode())
    371         Ops.push_back(Load.getOperand(0));
    372       else
    373         Ops.push_back(Chain.getOperand(i));
    374     SDValue NewChain =
    375       CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
    376                       MVT::Other, &Ops[0], Ops.size());
    377     Ops.clear();
    378     Ops.push_back(NewChain);
    379   }
    380   for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
    381     Ops.push_back(OrigChain.getOperand(i));
    382   CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
    383   CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
    384                              Load.getOperand(1), Load.getOperand(2));
    385   Ops.clear();
    386   Ops.push_back(SDValue(Load.getNode(), 1));
    387   for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
    388     Ops.push_back(Call.getOperand(i));
    389   CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
    390 }
    391 
    392 /// isCalleeLoad - Return true if call address is a load and it can be
    393 /// moved below CALLSEQ_START and the chains leading up to the call.
    394 /// Return the CALLSEQ_START by reference as a second output.
    395 /// In the case of a tail call, there isn't a callseq node between the call
    396 /// chain and the load.
    397 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
    398   if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
    399     return false;
    400   LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
    401   if (!LD ||
    402       LD->isVolatile() ||
    403       LD->getAddressingMode() != ISD::UNINDEXED ||
    404       LD->getExtensionType() != ISD::NON_EXTLOAD)
    405     return false;
    406 
    407   // Now let's find the callseq_start.
    408   while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
    409     if (!Chain.hasOneUse())
    410       return false;
    411     Chain = Chain.getOperand(0);
    412   }
    413 
    414   if (!Chain.getNumOperands())
    415     return false;
    416   if (Chain.getOperand(0).getNode() == Callee.getNode())
    417     return true;
    418   if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
    419       Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
    420       Callee.getValue(1).hasOneUse())
    421     return true;
    422   return false;
    423 }
    424 
    425 void X86DAGToDAGISel::PreprocessISelDAG() {
    426   // OptForSize is used in pattern predicates that isel is matching.
    427   OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
    428 
    429   for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
    430        E = CurDAG->allnodes_end(); I != E; ) {
    431     SDNode *N = I++;  // Preincrement iterator to avoid invalidation issues.
    432 
    433     if (OptLevel != CodeGenOpt::None &&
    434         (N->getOpcode() == X86ISD::CALL ||
    435          N->getOpcode() == X86ISD::TC_RETURN)) {
    436       /// Also try moving call address load from outside callseq_start to just
    437       /// before the call to allow it to be folded.
    438       ///
    439       ///     [Load chain]
    440       ///         ^
    441       ///         |
    442       ///       [Load]
    443       ///       ^    ^
    444       ///       |    |
    445       ///      /      \--
    446       ///     /          |
    447       ///[CALLSEQ_START] |
    448       ///     ^          |
    449       ///     |          |
    450       /// [LOAD/C2Reg]   |
    451       ///     |          |
    452       ///      \        /
    453       ///       \      /
    454       ///       [CALL]
    455       bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
    456       SDValue Chain = N->getOperand(0);
    457       SDValue Load  = N->getOperand(1);
    458       if (!isCalleeLoad(Load, Chain, HasCallSeq))
    459         continue;
    460       MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
    461       ++NumLoadMoved;
    462       continue;
    463     }
    464 
    465     // Lower fpround and fpextend nodes that target the FP stack to be store and
    466     // load to the stack.  This is a gross hack.  We would like to simply mark
    467     // these as being illegal, but when we do that, legalize produces these when
    468     // it expands calls, then expands these in the same legalize pass.  We would
    469     // like dag combine to be able to hack on these between the call expansion
    470     // and the node legalization.  As such this pass basically does "really
    471     // late" legalization of these inline with the X86 isel pass.
    472     // FIXME: This should only happen when not compiled with -O0.
    473     if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
    474       continue;
    475 
    476     EVT SrcVT = N->getOperand(0).getValueType();
    477     EVT DstVT = N->getValueType(0);
    478 
    479     // If any of the sources are vectors, no fp stack involved.
    480     if (SrcVT.isVector() || DstVT.isVector())
    481       continue;
    482 
    483     // If the source and destination are SSE registers, then this is a legal
    484     // conversion that should not be lowered.
    485     bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
    486     bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
    487     if (SrcIsSSE && DstIsSSE)
    488       continue;
    489 
    490     if (!SrcIsSSE && !DstIsSSE) {
    491       // If this is an FPStack extension, it is a noop.
    492       if (N->getOpcode() == ISD::FP_EXTEND)
    493         continue;
    494       // If this is a value-preserving FPStack truncation, it is a noop.
    495       if (N->getConstantOperandVal(1))
    496         continue;
    497     }
    498 
    499     // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
    500     // FPStack has extload and truncstore.  SSE can fold direct loads into other
    501     // operations.  Based on this, decide what we want to do.
    502     EVT MemVT;
    503     if (N->getOpcode() == ISD::FP_ROUND)
    504       MemVT = DstVT;  // FP_ROUND must use DstVT, we can't do a 'trunc load'.
    505     else
    506       MemVT = SrcIsSSE ? SrcVT : DstVT;
    507 
    508     SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
    509     DebugLoc dl = N->getDebugLoc();
    510 
    511     // FIXME: optimize the case where the src/dest is a load or store?
    512     SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
    513                                           N->getOperand(0),
    514                                           MemTmp, MachinePointerInfo(), MemVT,
    515                                           false, false, 0);
    516     SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
    517                                         MachinePointerInfo(),
    518                                         MemVT, false, false, 0);
    519 
    520     // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
    521     // extload we created.  This will cause general havok on the dag because
    522     // anything below the conversion could be folded into other existing nodes.
    523     // To avoid invalidating 'I', back it up to the convert node.
    524     --I;
    525     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
    526 
    527     // Now that we did that, the node is dead.  Increment the iterator to the
    528     // next node to process, then delete N.
    529     ++I;
    530     CurDAG->DeleteNode(N);
    531   }
    532 }
    533 
    534 
    535 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
    536 /// the main function.
    537 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
    538                                              MachineFrameInfo *MFI) {
    539   const TargetInstrInfo *TII = TM.getInstrInfo();
    540   if (Subtarget->isTargetCygMing()) {
    541     unsigned CallOp =
    542       Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
    543     BuildMI(BB, DebugLoc(),
    544             TII->get(CallOp)).addExternalSymbol("__main");
    545   }
    546 }
    547 
    548 void X86DAGToDAGISel::EmitFunctionEntryCode() {
    549   // If this is main, emit special code for main.
    550   if (const Function *Fn = MF->getFunction())
    551     if (Fn->hasExternalLinkage() && Fn->getName() == "main")
    552       EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
    553 }
    554 
    555 static bool isDispSafeForFrameIndex(int64_t Val) {
    556   // On 64-bit platforms, we can run into an issue where a frame index
    557   // includes a displacement that, when added to the explicit displacement,
    558   // will overflow the displacement field. Assuming that the frame index
    559   // displacement fits into a 31-bit integer  (which is only slightly more
    560   // aggressive than the current fundamental assumption that it fits into
    561   // a 32-bit integer), a 31-bit disp should always be safe.
    562   return isInt<31>(Val);
    563 }
    564 
    565 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
    566                                             X86ISelAddressMode &AM) {
    567   int64_t Val = AM.Disp + Offset;
    568   CodeModel::Model M = TM.getCodeModel();
    569   if (Subtarget->is64Bit()) {
    570     if (!X86::isOffsetSuitableForCodeModel(Val, M,
    571                                            AM.hasSymbolicDisplacement()))
    572       return true;
    573     // In addition to the checks required for a register base, check that
    574     // we do not try to use an unsafe Disp with a frame index.
    575     if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
    576         !isDispSafeForFrameIndex(Val))
    577       return true;
    578   }
    579   AM.Disp = Val;
    580   return false;
    581 
    582 }
    583 
    584 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
    585   SDValue Address = N->getOperand(1);
    586 
    587   // load gs:0 -> GS segment register.
    588   // load fs:0 -> FS segment register.
    589   //
    590   // This optimization is valid because the GNU TLS model defines that
    591   // gs:0 (or fs:0 on X86-64) contains its own address.
    592   // For more information see http://people.redhat.com/drepper/tls.pdf
    593   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
    594     if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
    595         Subtarget->isTargetELF())
    596       switch (N->getPointerInfo().getAddrSpace()) {
    597       case 256:
    598         AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
    599         return false;
    600       case 257:
    601         AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
    602         return false;
    603       }
    604 
    605   return true;
    606 }
    607 
    608 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
    609 /// into an addressing mode.  These wrap things that will resolve down into a
    610 /// symbol reference.  If no match is possible, this returns true, otherwise it
    611 /// returns false.
    612 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
    613   // If the addressing mode already has a symbol as the displacement, we can
    614   // never match another symbol.
    615   if (AM.hasSymbolicDisplacement())
    616     return true;
    617 
    618   SDValue N0 = N.getOperand(0);
    619   CodeModel::Model M = TM.getCodeModel();
    620 
    621   // Handle X86-64 rip-relative addresses.  We check this before checking direct
    622   // folding because RIP is preferable to non-RIP accesses.
    623   if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
    624       // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
    625       // they cannot be folded into immediate fields.
    626       // FIXME: This can be improved for kernel and other models?
    627       (M == CodeModel::Small || M == CodeModel::Kernel)) {
    628     // Base and index reg must be 0 in order to use %rip as base.
    629     if (AM.hasBaseOrIndexReg())
    630       return true;
    631     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
    632       X86ISelAddressMode Backup = AM;
    633       AM.GV = G->getGlobal();
    634       AM.SymbolFlags = G->getTargetFlags();
    635       if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
    636         AM = Backup;
    637         return true;
    638       }
    639     } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
    640       X86ISelAddressMode Backup = AM;
    641       AM.CP = CP->getConstVal();
    642       AM.Align = CP->getAlignment();
    643       AM.SymbolFlags = CP->getTargetFlags();
    644       if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
    645         AM = Backup;
    646         return true;
    647       }
    648     } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
    649       AM.ES = S->getSymbol();
    650       AM.SymbolFlags = S->getTargetFlags();
    651     } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
    652       AM.JT = J->getIndex();
    653       AM.SymbolFlags = J->getTargetFlags();
    654     } else {
    655       AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
    656       AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
    657     }
    658 
    659     if (N.getOpcode() == X86ISD::WrapperRIP)
    660       AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
    661     return false;
    662   }
    663 
    664   // Handle the case when globals fit in our immediate field: This is true for
    665   // X86-32 always and X86-64 when in -mcmodel=small mode.  In 64-bit
    666   // mode, this only applies to a non-RIP-relative computation.
    667   if (!Subtarget->is64Bit() ||
    668       M == CodeModel::Small || M == CodeModel::Kernel) {
    669     assert(N.getOpcode() != X86ISD::WrapperRIP &&
    670            "RIP-relative addressing already handled");
    671     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
    672       AM.GV = G->getGlobal();
    673       AM.Disp += G->getOffset();
    674       AM.SymbolFlags = G->getTargetFlags();
    675     } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
    676       AM.CP = CP->getConstVal();
    677       AM.Align = CP->getAlignment();
    678       AM.Disp += CP->getOffset();
    679       AM.SymbolFlags = CP->getTargetFlags();
    680     } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
    681       AM.ES = S->getSymbol();
    682       AM.SymbolFlags = S->getTargetFlags();
    683     } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
    684       AM.JT = J->getIndex();
    685       AM.SymbolFlags = J->getTargetFlags();
    686     } else {
    687       AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
    688       AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
    689     }
    690     return false;
    691   }
    692 
    693   return true;
    694 }
    695 
    696 /// MatchAddress - Add the specified node to the specified addressing mode,
    697 /// returning true if it cannot be done.  This just pattern matches for the
    698 /// addressing mode.
    699 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
    700   if (MatchAddressRecursively(N, AM, 0))
    701     return true;
    702 
    703   // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
    704   // a smaller encoding and avoids a scaled-index.
    705   if (AM.Scale == 2 &&
    706       AM.BaseType == X86ISelAddressMode::RegBase &&
    707       AM.Base_Reg.getNode() == 0) {
    708     AM.Base_Reg = AM.IndexReg;
    709     AM.Scale = 1;
    710   }
    711 
    712   // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
    713   // because it has a smaller encoding.
    714   // TODO: Which other code models can use this?
    715   if (TM.getCodeModel() == CodeModel::Small &&
    716       Subtarget->is64Bit() &&
    717       AM.Scale == 1 &&
    718       AM.BaseType == X86ISelAddressMode::RegBase &&
    719       AM.Base_Reg.getNode() == 0 &&
    720       AM.IndexReg.getNode() == 0 &&
    721       AM.SymbolFlags == X86II::MO_NO_FLAG &&
    722       AM.hasSymbolicDisplacement())
    723     AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
    724 
    725   return false;
    726 }
    727 
    728 // Insert a node into the DAG at least before the Pos node's position. This
    729 // will reposition the node as needed, and will assign it a node ID that is <=
    730 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
    731 // IDs! The selection DAG must no longer depend on their uniqueness when this
    732 // is used.
    733 static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
    734   if (N.getNode()->getNodeId() == -1 ||
    735       N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
    736     DAG.RepositionNode(Pos.getNode(), N.getNode());
    737     N.getNode()->setNodeId(Pos.getNode()->getNodeId());
    738   }
    739 }
    740 
    741 // Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
    742 // allows us to convert the shift and and into an h-register extract and
    743 // a scaled index. Returns false if the simplification is performed.
    744 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
    745                                       uint64_t Mask,
    746                                       SDValue Shift, SDValue X,
    747                                       X86ISelAddressMode &AM) {
    748   if (Shift.getOpcode() != ISD::SRL ||
    749       !isa<ConstantSDNode>(Shift.getOperand(1)) ||
    750       !Shift.hasOneUse())
    751     return true;
    752 
    753   int ScaleLog = 8 - Shift.getConstantOperandVal(1);
    754   if (ScaleLog <= 0 || ScaleLog >= 4 ||
    755       Mask != (0xffu << ScaleLog))
    756     return true;
    757 
    758   EVT VT = N.getValueType();
    759   DebugLoc DL = N.getDebugLoc();
    760   SDValue Eight = DAG.getConstant(8, MVT::i8);
    761   SDValue NewMask = DAG.getConstant(0xff, VT);
    762   SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
    763   SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
    764   SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
    765   SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
    766 
    767   // Insert the new nodes into the topological ordering. We must do this in
    768   // a valid topological ordering as nothing is going to go back and re-sort
    769   // these nodes. We continually insert before 'N' in sequence as this is
    770   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
    771   // hierarchy left to express.
    772   InsertDAGNode(DAG, N, Eight);
    773   InsertDAGNode(DAG, N, Srl);
    774   InsertDAGNode(DAG, N, NewMask);
    775   InsertDAGNode(DAG, N, And);
    776   InsertDAGNode(DAG, N, ShlCount);
    777   InsertDAGNode(DAG, N, Shl);
    778   DAG.ReplaceAllUsesWith(N, Shl);
    779   AM.IndexReg = And;
    780   AM.Scale = (1 << ScaleLog);
    781   return false;
    782 }
    783 
    784 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
    785 // allows us to fold the shift into this addressing mode. Returns false if the
    786 // transform succeeded.
    787 static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
    788                                         uint64_t Mask,
    789                                         SDValue Shift, SDValue X,
    790                                         X86ISelAddressMode &AM) {
    791   if (Shift.getOpcode() != ISD::SHL ||
    792       !isa<ConstantSDNode>(Shift.getOperand(1)))
    793     return true;
    794 
    795   // Not likely to be profitable if either the AND or SHIFT node has more
    796   // than one use (unless all uses are for address computation). Besides,
    797   // isel mechanism requires their node ids to be reused.
    798   if (!N.hasOneUse() || !Shift.hasOneUse())
    799     return true;
    800 
    801   // Verify that the shift amount is something we can fold.
    802   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
    803   if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
    804     return true;
    805 
    806   EVT VT = N.getValueType();
    807   DebugLoc DL = N.getDebugLoc();
    808   SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
    809   SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
    810   SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
    811 
    812   // Insert the new nodes into the topological ordering. We must do this in
    813   // a valid topological ordering as nothing is going to go back and re-sort
    814   // these nodes. We continually insert before 'N' in sequence as this is
    815   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
    816   // hierarchy left to express.
    817   InsertDAGNode(DAG, N, NewMask);
    818   InsertDAGNode(DAG, N, NewAnd);
    819   InsertDAGNode(DAG, N, NewShift);
    820   DAG.ReplaceAllUsesWith(N, NewShift);
    821 
    822   AM.Scale = 1 << ShiftAmt;
    823   AM.IndexReg = NewAnd;
    824   return false;
    825 }
    826 
    827 // Implement some heroics to detect shifts of masked values where the mask can
    828 // be replaced by extending the shift and undoing that in the addressing mode
    829 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
    830 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
    831 // the addressing mode. This results in code such as:
    832 //
    833 //   int f(short *y, int *lookup_table) {
    834 //     ...
    835 //     return *y + lookup_table[*y >> 11];
    836 //   }
    837 //
    838 // Turning into:
    839 //   movzwl (%rdi), %eax
    840 //   movl %eax, %ecx
    841 //   shrl $11, %ecx
    842 //   addl (%rsi,%rcx,4), %eax
    843 //
    844 // Instead of:
    845 //   movzwl (%rdi), %eax
    846 //   movl %eax, %ecx
    847 //   shrl $9, %ecx
    848 //   andl $124, %rcx
    849 //   addl (%rsi,%rcx), %eax
    850 //
    851 // Note that this function assumes the mask is provided as a mask *after* the
    852 // value is shifted. The input chain may or may not match that, but computing
    853 // such a mask is trivial.
    854 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
    855                                     uint64_t Mask,
    856                                     SDValue Shift, SDValue X,
    857                                     X86ISelAddressMode &AM) {
    858   if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
    859       !isa<ConstantSDNode>(Shift.getOperand(1)))
    860     return true;
    861 
    862   unsigned ShiftAmt = Shift.getConstantOperandVal(1);
    863   unsigned MaskLZ = CountLeadingZeros_64(Mask);
    864   unsigned MaskTZ = CountTrailingZeros_64(Mask);
    865 
    866   // The amount of shift we're trying to fit into the addressing mode is taken
    867   // from the trailing zeros of the mask.
    868   unsigned AMShiftAmt = MaskTZ;
    869 
    870   // There is nothing we can do here unless the mask is removing some bits.
    871   // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
    872   if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
    873 
    874   // We also need to ensure that mask is a continuous run of bits.
    875   if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
    876 
    877   // Scale the leading zero count down based on the actual size of the value.
    878   // Also scale it down based on the size of the shift.
    879   MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
    880 
    881   // The final check is to ensure that any masked out high bits of X are
    882   // already known to be zero. Otherwise, the mask has a semantic impact
    883   // other than masking out a couple of low bits. Unfortunately, because of
    884   // the mask, zero extensions will be removed from operands in some cases.
    885   // This code works extra hard to look through extensions because we can
    886   // replace them with zero extensions cheaply if necessary.
    887   bool ReplacingAnyExtend = false;
    888   if (X.getOpcode() == ISD::ANY_EXTEND) {
    889     unsigned ExtendBits =
    890       X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
    891     // Assume that we'll replace the any-extend with a zero-extend, and
    892     // narrow the search to the extended value.
    893     X = X.getOperand(0);
    894     MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
    895     ReplacingAnyExtend = true;
    896   }
    897   APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
    898                                                MaskLZ);
    899   APInt KnownZero, KnownOne;
    900   DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
    901   if (MaskedHighBits != KnownZero) return true;
    902 
    903   // We've identified a pattern that can be transformed into a single shift
    904   // and an addressing mode. Make it so.
    905   EVT VT = N.getValueType();
    906   if (ReplacingAnyExtend) {
    907     assert(X.getValueType() != VT);
    908     // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
    909     SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
    910     InsertDAGNode(DAG, N, NewX);
    911     X = NewX;
    912   }
    913   DebugLoc DL = N.getDebugLoc();
    914   SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
    915   SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
    916   SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
    917   SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
    918 
    919   // Insert the new nodes into the topological ordering. We must do this in
    920   // a valid topological ordering as nothing is going to go back and re-sort
    921   // these nodes. We continually insert before 'N' in sequence as this is
    922   // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
    923   // hierarchy left to express.
    924   InsertDAGNode(DAG, N, NewSRLAmt);
    925   InsertDAGNode(DAG, N, NewSRL);
    926   InsertDAGNode(DAG, N, NewSHLAmt);
    927   InsertDAGNode(DAG, N, NewSHL);
    928   DAG.ReplaceAllUsesWith(N, NewSHL);
    929 
    930   AM.Scale = 1 << AMShiftAmt;
    931   AM.IndexReg = NewSRL;
    932   return false;
    933 }
    934 
    935 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
    936                                               unsigned Depth) {
    937   DebugLoc dl = N.getDebugLoc();
    938   DEBUG({
    939       dbgs() << "MatchAddress: ";
    940       AM.dump();
    941     });
    942   // Limit recursion.
    943   if (Depth > 5)
    944     return MatchAddressBase(N, AM);
    945 
    946   // If this is already a %rip relative address, we can only merge immediates
    947   // into it.  Instead of handling this in every case, we handle it here.
    948   // RIP relative addressing: %rip + 32-bit displacement!
    949   if (AM.isRIPRelative()) {
    950     // FIXME: JumpTable and ExternalSymbol address currently don't like
    951     // displacements.  It isn't very important, but this should be fixed for
    952     // consistency.
    953     if (!AM.ES && AM.JT != -1) return true;
    954 
    955     if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
    956       if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
    957         return false;
    958     return true;
    959   }
    960 
    961   switch (N.getOpcode()) {
    962   default: break;
    963   case ISD::Constant: {
    964     uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
    965     if (!FoldOffsetIntoAddress(Val, AM))
    966       return false;
    967     break;
    968   }
    969 
    970   case X86ISD::Wrapper:
    971   case X86ISD::WrapperRIP:
    972     if (!MatchWrapper(N, AM))
    973       return false;
    974     break;
    975 
    976   case ISD::LOAD:
    977     if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
    978       return false;
    979     break;
    980 
    981   case ISD::FrameIndex:
    982     if (AM.BaseType == X86ISelAddressMode::RegBase &&
    983         AM.Base_Reg.getNode() == 0 &&
    984         (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
    985       AM.BaseType = X86ISelAddressMode::FrameIndexBase;
    986       AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
    987       return false;
    988     }
    989     break;
    990 
    991   case ISD::SHL:
    992     if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
    993       break;
    994 
    995     if (ConstantSDNode
    996           *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
    997       unsigned Val = CN->getZExtValue();
    998       // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
    999       // that the base operand remains free for further matching. If
   1000       // the base doesn't end up getting used, a post-processing step
   1001       // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
   1002       if (Val == 1 || Val == 2 || Val == 3) {
   1003         AM.Scale = 1 << Val;
   1004         SDValue ShVal = N.getNode()->getOperand(0);
   1005 
   1006         // Okay, we know that we have a scale by now.  However, if the scaled
   1007         // value is an add of something and a constant, we can fold the
   1008         // constant into the disp field here.
   1009         if (CurDAG->isBaseWithConstantOffset(ShVal)) {
   1010           AM.IndexReg = ShVal.getNode()->getOperand(0);
   1011           ConstantSDNode *AddVal =
   1012             cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
   1013           uint64_t Disp = AddVal->getSExtValue() << Val;
   1014           if (!FoldOffsetIntoAddress(Disp, AM))
   1015             return false;
   1016         }
   1017 
   1018         AM.IndexReg = ShVal;
   1019         return false;
   1020       }
   1021     break;
   1022     }
   1023 
   1024   case ISD::SRL: {
   1025     // Scale must not be used already.
   1026     if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
   1027 
   1028     SDValue And = N.getOperand(0);
   1029     if (And.getOpcode() != ISD::AND) break;
   1030     SDValue X = And.getOperand(0);
   1031 
   1032     // We only handle up to 64-bit values here as those are what matter for
   1033     // addressing mode optimizations.
   1034     if (X.getValueSizeInBits() > 64) break;
   1035 
   1036     // The mask used for the transform is expected to be post-shift, but we
   1037     // found the shift first so just apply the shift to the mask before passing
   1038     // it down.
   1039     if (!isa<ConstantSDNode>(N.getOperand(1)) ||
   1040         !isa<ConstantSDNode>(And.getOperand(1)))
   1041       break;
   1042     uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
   1043 
   1044     // Try to fold the mask and shift into the scale, and return false if we
   1045     // succeed.
   1046     if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
   1047       return false;
   1048     break;
   1049   }
   1050 
   1051   case ISD::SMUL_LOHI:
   1052   case ISD::UMUL_LOHI:
   1053     // A mul_lohi where we need the low part can be folded as a plain multiply.
   1054     if (N.getResNo() != 0) break;
   1055     // FALL THROUGH
   1056   case ISD::MUL:
   1057   case X86ISD::MUL_IMM:
   1058     // X*[3,5,9] -> X+X*[2,4,8]
   1059     if (AM.BaseType == X86ISelAddressMode::RegBase &&
   1060         AM.Base_Reg.getNode() == 0 &&
   1061         AM.IndexReg.getNode() == 0) {
   1062       if (ConstantSDNode
   1063             *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
   1064         if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
   1065             CN->getZExtValue() == 9) {
   1066           AM.Scale = unsigned(CN->getZExtValue())-1;
   1067 
   1068           SDValue MulVal = N.getNode()->getOperand(0);
   1069           SDValue Reg;
   1070 
   1071           // Okay, we know that we have a scale by now.  However, if the scaled
   1072           // value is an add of something and a constant, we can fold the
   1073           // constant into the disp field here.
   1074           if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
   1075               isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
   1076             Reg = MulVal.getNode()->getOperand(0);
   1077             ConstantSDNode *AddVal =
   1078               cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
   1079             uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
   1080             if (FoldOffsetIntoAddress(Disp, AM))
   1081               Reg = N.getNode()->getOperand(0);
   1082           } else {
   1083             Reg = N.getNode()->getOperand(0);
   1084           }
   1085 
   1086           AM.IndexReg = AM.Base_Reg = Reg;
   1087           return false;
   1088         }
   1089     }
   1090     break;
   1091 
   1092   case ISD::SUB: {
   1093     // Given A-B, if A can be completely folded into the address and
   1094     // the index field with the index field unused, use -B as the index.
   1095     // This is a win if a has multiple parts that can be folded into
   1096     // the address. Also, this saves a mov if the base register has
   1097     // other uses, since it avoids a two-address sub instruction, however
   1098     // it costs an additional mov if the index register has other uses.
   1099 
   1100     // Add an artificial use to this node so that we can keep track of
   1101     // it if it gets CSE'd with a different node.
   1102     HandleSDNode Handle(N);
   1103 
   1104     // Test if the LHS of the sub can be folded.
   1105     X86ISelAddressMode Backup = AM;
   1106     if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
   1107       AM = Backup;
   1108       break;
   1109     }
   1110     // Test if the index field is free for use.
   1111     if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
   1112       AM = Backup;
   1113       break;
   1114     }
   1115 
   1116     int Cost = 0;
   1117     SDValue RHS = Handle.getValue().getNode()->getOperand(1);
   1118     // If the RHS involves a register with multiple uses, this
   1119     // transformation incurs an extra mov, due to the neg instruction
   1120     // clobbering its operand.
   1121     if (!RHS.getNode()->hasOneUse() ||
   1122         RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
   1123         RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
   1124         RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
   1125         (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
   1126          RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
   1127       ++Cost;
   1128     // If the base is a register with multiple uses, this
   1129     // transformation may save a mov.
   1130     if ((AM.BaseType == X86ISelAddressMode::RegBase &&
   1131          AM.Base_Reg.getNode() &&
   1132          !AM.Base_Reg.getNode()->hasOneUse()) ||
   1133         AM.BaseType == X86ISelAddressMode::FrameIndexBase)
   1134       --Cost;
   1135     // If the folded LHS was interesting, this transformation saves
   1136     // address arithmetic.
   1137     if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
   1138         ((AM.Disp != 0) && (Backup.Disp == 0)) +
   1139         (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
   1140       --Cost;
   1141     // If it doesn't look like it may be an overall win, don't do it.
   1142     if (Cost >= 0) {
   1143       AM = Backup;
   1144       break;
   1145     }
   1146 
   1147     // Ok, the transformation is legal and appears profitable. Go for it.
   1148     SDValue Zero = CurDAG->getConstant(0, N.getValueType());
   1149     SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
   1150     AM.IndexReg = Neg;
   1151     AM.Scale = 1;
   1152 
   1153     // Insert the new nodes into the topological ordering.
   1154     InsertDAGNode(*CurDAG, N, Zero);
   1155     InsertDAGNode(*CurDAG, N, Neg);
   1156     return false;
   1157   }
   1158 
   1159   case ISD::ADD: {
   1160     // Add an artificial use to this node so that we can keep track of
   1161     // it if it gets CSE'd with a different node.
   1162     HandleSDNode Handle(N);
   1163 
   1164     X86ISelAddressMode Backup = AM;
   1165     if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
   1166         !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
   1167       return false;
   1168     AM = Backup;
   1169 
   1170     // Try again after commuting the operands.
   1171     if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
   1172         !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
   1173       return false;
   1174     AM = Backup;
   1175 
   1176     // If we couldn't fold both operands into the address at the same time,
   1177     // see if we can just put each operand into a register and fold at least
   1178     // the add.
   1179     if (AM.BaseType == X86ISelAddressMode::RegBase &&
   1180         !AM.Base_Reg.getNode() &&
   1181         !AM.IndexReg.getNode()) {
   1182       N = Handle.getValue();
   1183       AM.Base_Reg = N.getOperand(0);
   1184       AM.IndexReg = N.getOperand(1);
   1185       AM.Scale = 1;
   1186       return false;
   1187     }
   1188     N = Handle.getValue();
   1189     break;
   1190   }
   1191 
   1192   case ISD::OR:
   1193     // Handle "X | C" as "X + C" iff X is known to have C bits clear.
   1194     if (CurDAG->isBaseWithConstantOffset(N)) {
   1195       X86ISelAddressMode Backup = AM;
   1196       ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
   1197 
   1198       // Start with the LHS as an addr mode.
   1199       if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
   1200           !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
   1201         return false;
   1202       AM = Backup;
   1203     }
   1204     break;
   1205 
   1206   case ISD::AND: {
   1207     // Perform some heroic transforms on an and of a constant-count shift
   1208     // with a constant to enable use of the scaled offset field.
   1209 
   1210     // Scale must not be used already.
   1211     if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
   1212 
   1213     SDValue Shift = N.getOperand(0);
   1214     if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
   1215     SDValue X = Shift.getOperand(0);
   1216 
   1217     // We only handle up to 64-bit values here as those are what matter for
   1218     // addressing mode optimizations.
   1219     if (X.getValueSizeInBits() > 64) break;
   1220 
   1221     if (!isa<ConstantSDNode>(N.getOperand(1)))
   1222       break;
   1223     uint64_t Mask = N.getConstantOperandVal(1);
   1224 
   1225     // Try to fold the mask and shift into an extract and scale.
   1226     if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
   1227       return false;
   1228 
   1229     // Try to fold the mask and shift directly into the scale.
   1230     if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
   1231       return false;
   1232 
   1233     // Try to swap the mask and shift to place shifts which can be done as
   1234     // a scale on the outside of the mask.
   1235     if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
   1236       return false;
   1237     break;
   1238   }
   1239   }
   1240 
   1241   return MatchAddressBase(N, AM);
   1242 }
   1243 
   1244 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
   1245 /// specified addressing mode without any further recursion.
   1246 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
   1247   // Is the base register already occupied?
   1248   if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
   1249     // If so, check to see if the scale index register is set.
   1250     if (AM.IndexReg.getNode() == 0) {
   1251       AM.IndexReg = N;
   1252       AM.Scale = 1;
   1253       return false;
   1254     }
   1255 
   1256     // Otherwise, we cannot select it.
   1257     return true;
   1258   }
   1259 
   1260   // Default, generate it as a register.
   1261   AM.BaseType = X86ISelAddressMode::RegBase;
   1262   AM.Base_Reg = N;
   1263   return false;
   1264 }
   1265 
   1266 /// SelectAddr - returns true if it is able pattern match an addressing mode.
   1267 /// It returns the operands which make up the maximal addressing mode it can
   1268 /// match by reference.
   1269 ///
   1270 /// Parent is the parent node of the addr operand that is being matched.  It
   1271 /// is always a load, store, atomic node, or null.  It is only null when
   1272 /// checking memory operands for inline asm nodes.
   1273 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
   1274                                  SDValue &Scale, SDValue &Index,
   1275                                  SDValue &Disp, SDValue &Segment) {
   1276   X86ISelAddressMode AM;
   1277 
   1278   if (Parent &&
   1279       // This list of opcodes are all the nodes that have an "addr:$ptr" operand
   1280       // that are not a MemSDNode, and thus don't have proper addrspace info.
   1281       Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
   1282       Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
   1283       Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
   1284     unsigned AddrSpace =
   1285       cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
   1286     // AddrSpace 256 -> GS, 257 -> FS.
   1287     if (AddrSpace == 256)
   1288       AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
   1289     if (AddrSpace == 257)
   1290       AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
   1291   }
   1292 
   1293   if (MatchAddress(N, AM))
   1294     return false;
   1295 
   1296   EVT VT = N.getValueType();
   1297   if (AM.BaseType == X86ISelAddressMode::RegBase) {
   1298     if (!AM.Base_Reg.getNode())
   1299       AM.Base_Reg = CurDAG->getRegister(0, VT);
   1300   }
   1301 
   1302   if (!AM.IndexReg.getNode())
   1303     AM.IndexReg = CurDAG->getRegister(0, VT);
   1304 
   1305   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   1306   return true;
   1307 }
   1308 
   1309 /// SelectScalarSSELoad - Match a scalar SSE load.  In particular, we want to
   1310 /// match a load whose top elements are either undef or zeros.  The load flavor
   1311 /// is derived from the type of N, which is either v4f32 or v2f64.
   1312 ///
   1313 /// We also return:
   1314 ///   PatternChainNode: this is the matched node that has a chain input and
   1315 ///   output.
   1316 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
   1317                                           SDValue N, SDValue &Base,
   1318                                           SDValue &Scale, SDValue &Index,
   1319                                           SDValue &Disp, SDValue &Segment,
   1320                                           SDValue &PatternNodeWithChain) {
   1321   if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
   1322     PatternNodeWithChain = N.getOperand(0);
   1323     if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
   1324         PatternNodeWithChain.hasOneUse() &&
   1325         IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
   1326         IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
   1327       LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
   1328       if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
   1329         return false;
   1330       return true;
   1331     }
   1332   }
   1333 
   1334   // Also handle the case where we explicitly require zeros in the top
   1335   // elements.  This is a vector shuffle from the zero vector.
   1336   if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
   1337       // Check to see if the top elements are all zeros (or bitcast of zeros).
   1338       N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
   1339       N.getOperand(0).getNode()->hasOneUse() &&
   1340       ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
   1341       N.getOperand(0).getOperand(0).hasOneUse() &&
   1342       IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
   1343       IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
   1344     // Okay, this is a zero extending load.  Fold it.
   1345     LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
   1346     if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
   1347       return false;
   1348     PatternNodeWithChain = SDValue(LD, 0);
   1349     return true;
   1350   }
   1351   return false;
   1352 }
   1353 
   1354 
   1355 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
   1356 /// mode it matches can be cost effectively emitted as an LEA instruction.
   1357 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
   1358                                     SDValue &Base, SDValue &Scale,
   1359                                     SDValue &Index, SDValue &Disp,
   1360                                     SDValue &Segment) {
   1361   X86ISelAddressMode AM;
   1362 
   1363   // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
   1364   // segments.
   1365   SDValue Copy = AM.Segment;
   1366   SDValue T = CurDAG->getRegister(0, MVT::i32);
   1367   AM.Segment = T;
   1368   if (MatchAddress(N, AM))
   1369     return false;
   1370   assert (T == AM.Segment);
   1371   AM.Segment = Copy;
   1372 
   1373   EVT VT = N.getValueType();
   1374   unsigned Complexity = 0;
   1375   if (AM.BaseType == X86ISelAddressMode::RegBase)
   1376     if (AM.Base_Reg.getNode())
   1377       Complexity = 1;
   1378     else
   1379       AM.Base_Reg = CurDAG->getRegister(0, VT);
   1380   else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
   1381     Complexity = 4;
   1382 
   1383   if (AM.IndexReg.getNode())
   1384     Complexity++;
   1385   else
   1386     AM.IndexReg = CurDAG->getRegister(0, VT);
   1387 
   1388   // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
   1389   // a simple shift.
   1390   if (AM.Scale > 1)
   1391     Complexity++;
   1392 
   1393   // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
   1394   // to a LEA. This is determined with some expermentation but is by no means
   1395   // optimal (especially for code size consideration). LEA is nice because of
   1396   // its three-address nature. Tweak the cost function again when we can run
   1397   // convertToThreeAddress() at register allocation time.
   1398   if (AM.hasSymbolicDisplacement()) {
   1399     // For X86-64, we should always use lea to materialize RIP relative
   1400     // addresses.
   1401     if (Subtarget->is64Bit())
   1402       Complexity = 4;
   1403     else
   1404       Complexity += 2;
   1405   }
   1406 
   1407   if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
   1408     Complexity++;
   1409 
   1410   // If it isn't worth using an LEA, reject it.
   1411   if (Complexity <= 2)
   1412     return false;
   1413 
   1414   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   1415   return true;
   1416 }
   1417 
   1418 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
   1419 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
   1420                                         SDValue &Scale, SDValue &Index,
   1421                                         SDValue &Disp, SDValue &Segment) {
   1422   assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
   1423   const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
   1424 
   1425   X86ISelAddressMode AM;
   1426   AM.GV = GA->getGlobal();
   1427   AM.Disp += GA->getOffset();
   1428   AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
   1429   AM.SymbolFlags = GA->getTargetFlags();
   1430 
   1431   if (N.getValueType() == MVT::i32) {
   1432     AM.Scale = 1;
   1433     AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
   1434   } else {
   1435     AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
   1436   }
   1437 
   1438   getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
   1439   return true;
   1440 }
   1441 
   1442 
   1443 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
   1444                                   SDValue &Base, SDValue &Scale,
   1445                                   SDValue &Index, SDValue &Disp,
   1446                                   SDValue &Segment) {
   1447   if (!ISD::isNON_EXTLoad(N.getNode()) ||
   1448       !IsProfitableToFold(N, P, P) ||
   1449       !IsLegalToFold(N, P, P, OptLevel))
   1450     return false;
   1451 
   1452   return SelectAddr(N.getNode(),
   1453                     N.getOperand(1), Base, Scale, Index, Disp, Segment);
   1454 }
   1455 
   1456 /// getGlobalBaseReg - Return an SDNode that returns the value of
   1457 /// the global base register. Output instructions required to
   1458 /// initialize the global base register, if necessary.
   1459 ///
   1460 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
   1461   unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
   1462   return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
   1463 }
   1464 
   1465 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
   1466   SDValue Chain = Node->getOperand(0);
   1467   SDValue In1 = Node->getOperand(1);
   1468   SDValue In2L = Node->getOperand(2);
   1469   SDValue In2H = Node->getOperand(3);
   1470   SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   1471   if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
   1472     return NULL;
   1473   MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
   1474   MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
   1475   const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
   1476   SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
   1477                                            MVT::i32, MVT::i32, MVT::Other, Ops,
   1478                                            array_lengthof(Ops));
   1479   cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
   1480   return ResNode;
   1481 }
   1482 
   1483 // FIXME: Figure out some way to unify this with the 'or' and other code
   1484 // below.
   1485 SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
   1486   if (Node->hasAnyUseOfValue(0))
   1487     return 0;
   1488 
   1489   // Optimize common patterns for __sync_add_and_fetch and
   1490   // __sync_sub_and_fetch where the result is not used. This allows us
   1491   // to use "lock" version of add, sub, inc, dec instructions.
   1492   // FIXME: Do not use special instructions but instead add the "lock"
   1493   // prefix to the target node somehow. The extra information will then be
   1494   // transferred to machine instruction and it denotes the prefix.
   1495   SDValue Chain = Node->getOperand(0);
   1496   SDValue Ptr = Node->getOperand(1);
   1497   SDValue Val = Node->getOperand(2);
   1498   SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   1499   if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
   1500     return 0;
   1501 
   1502   bool isInc = false, isDec = false, isSub = false, isCN = false;
   1503   ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
   1504   if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) {
   1505     isCN = true;
   1506     int64_t CNVal = CN->getSExtValue();
   1507     if (CNVal == 1)
   1508       isInc = true;
   1509     else if (CNVal == -1)
   1510       isDec = true;
   1511     else if (CNVal >= 0)
   1512       Val = CurDAG->getTargetConstant(CNVal, NVT);
   1513     else {
   1514       isSub = true;
   1515       Val = CurDAG->getTargetConstant(-CNVal, NVT);
   1516     }
   1517   } else if (Val.hasOneUse() &&
   1518              Val.getOpcode() == ISD::SUB &&
   1519              X86::isZeroNode(Val.getOperand(0))) {
   1520     isSub = true;
   1521     Val = Val.getOperand(1);
   1522   }
   1523 
   1524   DebugLoc dl = Node->getDebugLoc();
   1525   unsigned Opc = 0;
   1526   switch (NVT.getSimpleVT().SimpleTy) {
   1527   default: return 0;
   1528   case MVT::i8:
   1529     if (isInc)
   1530       Opc = X86::LOCK_INC8m;
   1531     else if (isDec)
   1532       Opc = X86::LOCK_DEC8m;
   1533     else if (isSub) {
   1534       if (isCN)
   1535         Opc = X86::LOCK_SUB8mi;
   1536       else
   1537         Opc = X86::LOCK_SUB8mr;
   1538     } else {
   1539       if (isCN)
   1540         Opc = X86::LOCK_ADD8mi;
   1541       else
   1542         Opc = X86::LOCK_ADD8mr;
   1543     }
   1544     break;
   1545   case MVT::i16:
   1546     if (isInc)
   1547       Opc = X86::LOCK_INC16m;
   1548     else if (isDec)
   1549       Opc = X86::LOCK_DEC16m;
   1550     else if (isSub) {
   1551       if (isCN) {
   1552         if (immSext8(Val.getNode()))
   1553           Opc = X86::LOCK_SUB16mi8;
   1554         else
   1555           Opc = X86::LOCK_SUB16mi;
   1556       } else
   1557         Opc = X86::LOCK_SUB16mr;
   1558     } else {
   1559       if (isCN) {
   1560         if (immSext8(Val.getNode()))
   1561           Opc = X86::LOCK_ADD16mi8;
   1562         else
   1563           Opc = X86::LOCK_ADD16mi;
   1564       } else
   1565         Opc = X86::LOCK_ADD16mr;
   1566     }
   1567     break;
   1568   case MVT::i32:
   1569     if (isInc)
   1570       Opc = X86::LOCK_INC32m;
   1571     else if (isDec)
   1572       Opc = X86::LOCK_DEC32m;
   1573     else if (isSub) {
   1574       if (isCN) {
   1575         if (immSext8(Val.getNode()))
   1576           Opc = X86::LOCK_SUB32mi8;
   1577         else
   1578           Opc = X86::LOCK_SUB32mi;
   1579       } else
   1580         Opc = X86::LOCK_SUB32mr;
   1581     } else {
   1582       if (isCN) {
   1583         if (immSext8(Val.getNode()))
   1584           Opc = X86::LOCK_ADD32mi8;
   1585         else
   1586           Opc = X86::LOCK_ADD32mi;
   1587       } else
   1588         Opc = X86::LOCK_ADD32mr;
   1589     }
   1590     break;
   1591   case MVT::i64:
   1592     if (isInc)
   1593       Opc = X86::LOCK_INC64m;
   1594     else if (isDec)
   1595       Opc = X86::LOCK_DEC64m;
   1596     else if (isSub) {
   1597       Opc = X86::LOCK_SUB64mr;
   1598       if (isCN) {
   1599         if (immSext8(Val.getNode()))
   1600           Opc = X86::LOCK_SUB64mi8;
   1601         else if (i64immSExt32(Val.getNode()))
   1602           Opc = X86::LOCK_SUB64mi32;
   1603       }
   1604     } else {
   1605       Opc = X86::LOCK_ADD64mr;
   1606       if (isCN) {
   1607         if (immSext8(Val.getNode()))
   1608           Opc = X86::LOCK_ADD64mi8;
   1609         else if (i64immSExt32(Val.getNode()))
   1610           Opc = X86::LOCK_ADD64mi32;
   1611       }
   1612     }
   1613     break;
   1614   }
   1615 
   1616   SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
   1617                                                  dl, NVT), 0);
   1618   MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
   1619   MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
   1620   if (isInc || isDec) {
   1621     SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
   1622     SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
   1623     cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
   1624     SDValue RetVals[] = { Undef, Ret };
   1625     return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
   1626   } else {
   1627     SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
   1628     SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
   1629     cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
   1630     SDValue RetVals[] = { Undef, Ret };
   1631     return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
   1632   }
   1633 }
   1634 
   1635 enum AtomicOpc {
   1636   OR,
   1637   AND,
   1638   XOR,
   1639   AtomicOpcEnd
   1640 };
   1641 
   1642 enum AtomicSz {
   1643   ConstantI8,
   1644   I8,
   1645   SextConstantI16,
   1646   ConstantI16,
   1647   I16,
   1648   SextConstantI32,
   1649   ConstantI32,
   1650   I32,
   1651   SextConstantI64,
   1652   ConstantI64,
   1653   I64,
   1654   AtomicSzEnd
   1655 };
   1656 
   1657 static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
   1658   {
   1659     X86::LOCK_OR8mi,
   1660     X86::LOCK_OR8mr,
   1661     X86::LOCK_OR16mi8,
   1662     X86::LOCK_OR16mi,
   1663     X86::LOCK_OR16mr,
   1664     X86::LOCK_OR32mi8,
   1665     X86::LOCK_OR32mi,
   1666     X86::LOCK_OR32mr,
   1667     X86::LOCK_OR64mi8,
   1668     X86::LOCK_OR64mi32,
   1669     X86::LOCK_OR64mr
   1670   },
   1671   {
   1672     X86::LOCK_AND8mi,
   1673     X86::LOCK_AND8mr,
   1674     X86::LOCK_AND16mi8,
   1675     X86::LOCK_AND16mi,
   1676     X86::LOCK_AND16mr,
   1677     X86::LOCK_AND32mi8,
   1678     X86::LOCK_AND32mi,
   1679     X86::LOCK_AND32mr,
   1680     X86::LOCK_AND64mi8,
   1681     X86::LOCK_AND64mi32,
   1682     X86::LOCK_AND64mr
   1683   },
   1684   {
   1685     X86::LOCK_XOR8mi,
   1686     X86::LOCK_XOR8mr,
   1687     X86::LOCK_XOR16mi8,
   1688     X86::LOCK_XOR16mi,
   1689     X86::LOCK_XOR16mr,
   1690     X86::LOCK_XOR32mi8,
   1691     X86::LOCK_XOR32mi,
   1692     X86::LOCK_XOR32mr,
   1693     X86::LOCK_XOR64mi8,
   1694     X86::LOCK_XOR64mi32,
   1695     X86::LOCK_XOR64mr
   1696   }
   1697 };
   1698 
   1699 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
   1700   if (Node->hasAnyUseOfValue(0))
   1701     return 0;
   1702 
   1703   // Optimize common patterns for __sync_or_and_fetch and similar arith
   1704   // operations where the result is not used. This allows us to use the "lock"
   1705   // version of the arithmetic instruction.
   1706   // FIXME: Same as for 'add' and 'sub', try to merge those down here.
   1707   SDValue Chain = Node->getOperand(0);
   1708   SDValue Ptr = Node->getOperand(1);
   1709   SDValue Val = Node->getOperand(2);
   1710   SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   1711   if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
   1712     return 0;
   1713 
   1714   // Which index into the table.
   1715   enum AtomicOpc Op;
   1716   switch (Node->getOpcode()) {
   1717     case ISD::ATOMIC_LOAD_OR:
   1718       Op = OR;
   1719       break;
   1720     case ISD::ATOMIC_LOAD_AND:
   1721       Op = AND;
   1722       break;
   1723     case ISD::ATOMIC_LOAD_XOR:
   1724       Op = XOR;
   1725       break;
   1726     default:
   1727       return 0;
   1728   }
   1729 
   1730   bool isCN = false;
   1731   ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
   1732   if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
   1733     isCN = true;
   1734     Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
   1735   }
   1736 
   1737   unsigned Opc = 0;
   1738   switch (NVT.getSimpleVT().SimpleTy) {
   1739     default: return 0;
   1740     case MVT::i8:
   1741       if (isCN)
   1742         Opc = AtomicOpcTbl[Op][ConstantI8];
   1743       else
   1744         Opc = AtomicOpcTbl[Op][I8];
   1745       break;
   1746     case MVT::i16:
   1747       if (isCN) {
   1748         if (immSext8(Val.getNode()))
   1749           Opc = AtomicOpcTbl[Op][SextConstantI16];
   1750         else
   1751           Opc = AtomicOpcTbl[Op][ConstantI16];
   1752       } else
   1753         Opc = AtomicOpcTbl[Op][I16];
   1754       break;
   1755     case MVT::i32:
   1756       if (isCN) {
   1757         if (immSext8(Val.getNode()))
   1758           Opc = AtomicOpcTbl[Op][SextConstantI32];
   1759         else
   1760           Opc = AtomicOpcTbl[Op][ConstantI32];
   1761       } else
   1762         Opc = AtomicOpcTbl[Op][I32];
   1763       break;
   1764     case MVT::i64:
   1765       Opc = AtomicOpcTbl[Op][I64];
   1766       if (isCN) {
   1767         if (immSext8(Val.getNode()))
   1768           Opc = AtomicOpcTbl[Op][SextConstantI64];
   1769         else if (i64immSExt32(Val.getNode()))
   1770           Opc = AtomicOpcTbl[Op][ConstantI64];
   1771       }
   1772       break;
   1773   }
   1774 
   1775   assert(Opc != 0 && "Invalid arith lock transform!");
   1776 
   1777   DebugLoc dl = Node->getDebugLoc();
   1778   SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
   1779                                                  dl, NVT), 0);
   1780   MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
   1781   MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
   1782   SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
   1783   SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
   1784   cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
   1785   SDValue RetVals[] = { Undef, Ret };
   1786   return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
   1787 }
   1788 
   1789 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
   1790 /// any uses which require the SF or OF bits to be accurate.
   1791 static bool HasNoSignedComparisonUses(SDNode *N) {
   1792   // Examine each user of the node.
   1793   for (SDNode::use_iterator UI = N->use_begin(),
   1794          UE = N->use_end(); UI != UE; ++UI) {
   1795     // Only examine CopyToReg uses.
   1796     if (UI->getOpcode() != ISD::CopyToReg)
   1797       return false;
   1798     // Only examine CopyToReg uses that copy to EFLAGS.
   1799     if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
   1800           X86::EFLAGS)
   1801       return false;
   1802     // Examine each user of the CopyToReg use.
   1803     for (SDNode::use_iterator FlagUI = UI->use_begin(),
   1804            FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
   1805       // Only examine the Flag result.
   1806       if (FlagUI.getUse().getResNo() != 1) continue;
   1807       // Anything unusual: assume conservatively.
   1808       if (!FlagUI->isMachineOpcode()) return false;
   1809       // Examine the opcode of the user.
   1810       switch (FlagUI->getMachineOpcode()) {
   1811       // These comparisons don't treat the most significant bit specially.
   1812       case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
   1813       case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
   1814       case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
   1815       case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
   1816       case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
   1817       case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
   1818       case X86::CMOVA16rr: case X86::CMOVA16rm:
   1819       case X86::CMOVA32rr: case X86::CMOVA32rm:
   1820       case X86::CMOVA64rr: case X86::CMOVA64rm:
   1821       case X86::CMOVAE16rr: case X86::CMOVAE16rm:
   1822       case X86::CMOVAE32rr: case X86::CMOVAE32rm:
   1823       case X86::CMOVAE64rr: case X86::CMOVAE64rm:
   1824       case X86::CMOVB16rr: case X86::CMOVB16rm:
   1825       case X86::CMOVB32rr: case X86::CMOVB32rm:
   1826       case X86::CMOVB64rr: case X86::CMOVB64rm:
   1827       case X86::CMOVBE16rr: case X86::CMOVBE16rm:
   1828       case X86::CMOVBE32rr: case X86::CMOVBE32rm:
   1829       case X86::CMOVBE64rr: case X86::CMOVBE64rm:
   1830       case X86::CMOVE16rr: case X86::CMOVE16rm:
   1831       case X86::CMOVE32rr: case X86::CMOVE32rm:
   1832       case X86::CMOVE64rr: case X86::CMOVE64rm:
   1833       case X86::CMOVNE16rr: case X86::CMOVNE16rm:
   1834       case X86::CMOVNE32rr: case X86::CMOVNE32rm:
   1835       case X86::CMOVNE64rr: case X86::CMOVNE64rm:
   1836       case X86::CMOVNP16rr: case X86::CMOVNP16rm:
   1837       case X86::CMOVNP32rr: case X86::CMOVNP32rm:
   1838       case X86::CMOVNP64rr: case X86::CMOVNP64rm:
   1839       case X86::CMOVP16rr: case X86::CMOVP16rm:
   1840       case X86::CMOVP32rr: case X86::CMOVP32rm:
   1841       case X86::CMOVP64rr: case X86::CMOVP64rm:
   1842         continue;
   1843       // Anything else: assume conservatively.
   1844       default: return false;
   1845       }
   1846     }
   1847   }
   1848   return true;
   1849 }
   1850 
   1851 /// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
   1852 /// is suitable for doing the {load; increment or decrement; store} to modify
   1853 /// transformation.
   1854 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
   1855                                 SDValue StoredVal, SelectionDAG *CurDAG,
   1856                                 LoadSDNode* &LoadNode, SDValue &InputChain) {
   1857 
   1858   // is the value stored the result of a DEC or INC?
   1859   if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
   1860 
   1861   // is the stored value result 0 of the load?
   1862   if (StoredVal.getResNo() != 0) return false;
   1863 
   1864   // are there other uses of the loaded value than the inc or dec?
   1865   if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
   1866 
   1867   // is the store non-extending and non-indexed?
   1868   if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
   1869     return false;
   1870 
   1871   SDValue Load = StoredVal->getOperand(0);
   1872   // Is the stored value a non-extending and non-indexed load?
   1873   if (!ISD::isNormalLoad(Load.getNode())) return false;
   1874 
   1875   // Return LoadNode by reference.
   1876   LoadNode = cast<LoadSDNode>(Load);
   1877   // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
   1878   EVT LdVT = LoadNode->getMemoryVT();
   1879   if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
   1880       LdVT != MVT::i8)
   1881     return false;
   1882 
   1883   // Is store the only read of the loaded value?
   1884   if (!Load.hasOneUse())
   1885     return false;
   1886 
   1887   // Is the address of the store the same as the load?
   1888   if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
   1889       LoadNode->getOffset() != StoreNode->getOffset())
   1890     return false;
   1891 
   1892   // Check if the chain is produced by the load or is a TokenFactor with
   1893   // the load output chain as an operand. Return InputChain by reference.
   1894   SDValue Chain = StoreNode->getChain();
   1895 
   1896   bool ChainCheck = false;
   1897   if (Chain == Load.getValue(1)) {
   1898     ChainCheck = true;
   1899     InputChain = LoadNode->getChain();
   1900   } else if (Chain.getOpcode() == ISD::TokenFactor) {
   1901     SmallVector<SDValue, 4> ChainOps;
   1902     for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
   1903       SDValue Op = Chain.getOperand(i);
   1904       if (Op == Load.getValue(1)) {
   1905         ChainCheck = true;
   1906         continue;
   1907       }
   1908       ChainOps.push_back(Op);
   1909     }
   1910 
   1911     if (ChainCheck)
   1912       // Make a new TokenFactor with all the other input chains except
   1913       // for the load.
   1914       InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(),
   1915                                    MVT::Other, &ChainOps[0], ChainOps.size());
   1916   }
   1917   if (!ChainCheck)
   1918     return false;
   1919 
   1920   return true;
   1921 }
   1922 
   1923 /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
   1924 /// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
   1925 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
   1926   if (Opc == X86ISD::DEC) {
   1927     if (LdVT == MVT::i64) return X86::DEC64m;
   1928     if (LdVT == MVT::i32) return X86::DEC32m;
   1929     if (LdVT == MVT::i16) return X86::DEC16m;
   1930     if (LdVT == MVT::i8)  return X86::DEC8m;
   1931   } else {
   1932     assert(Opc == X86ISD::INC && "unrecognized opcode");
   1933     if (LdVT == MVT::i64) return X86::INC64m;
   1934     if (LdVT == MVT::i32) return X86::INC32m;
   1935     if (LdVT == MVT::i16) return X86::INC16m;
   1936     if (LdVT == MVT::i8)  return X86::INC8m;
   1937   }
   1938   llvm_unreachable("unrecognized size for LdVT");
   1939 }
   1940 
   1941 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
   1942   EVT NVT = Node->getValueType(0);
   1943   unsigned Opc, MOpc;
   1944   unsigned Opcode = Node->getOpcode();
   1945   DebugLoc dl = Node->getDebugLoc();
   1946 
   1947   DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
   1948 
   1949   if (Node->isMachineOpcode()) {
   1950     DEBUG(dbgs() << "== ";  Node->dump(CurDAG); dbgs() << '\n');
   1951     return NULL;   // Already selected.
   1952   }
   1953 
   1954   switch (Opcode) {
   1955   default: break;
   1956   case X86ISD::GlobalBaseReg:
   1957     return getGlobalBaseReg();
   1958 
   1959   case X86ISD::ATOMOR64_DAG:
   1960     return SelectAtomic64(Node, X86::ATOMOR6432);
   1961   case X86ISD::ATOMXOR64_DAG:
   1962     return SelectAtomic64(Node, X86::ATOMXOR6432);
   1963   case X86ISD::ATOMADD64_DAG:
   1964     return SelectAtomic64(Node, X86::ATOMADD6432);
   1965   case X86ISD::ATOMSUB64_DAG:
   1966     return SelectAtomic64(Node, X86::ATOMSUB6432);
   1967   case X86ISD::ATOMNAND64_DAG:
   1968     return SelectAtomic64(Node, X86::ATOMNAND6432);
   1969   case X86ISD::ATOMAND64_DAG:
   1970     return SelectAtomic64(Node, X86::ATOMAND6432);
   1971   case X86ISD::ATOMSWAP64_DAG:
   1972     return SelectAtomic64(Node, X86::ATOMSWAP6432);
   1973 
   1974   case ISD::ATOMIC_LOAD_ADD: {
   1975     SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
   1976     if (RetVal)
   1977       return RetVal;
   1978     break;
   1979   }
   1980   case ISD::ATOMIC_LOAD_XOR:
   1981   case ISD::ATOMIC_LOAD_AND:
   1982   case ISD::ATOMIC_LOAD_OR: {
   1983     SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
   1984     if (RetVal)
   1985       return RetVal;
   1986     break;
   1987   }
   1988   case ISD::AND:
   1989   case ISD::OR:
   1990   case ISD::XOR: {
   1991     // For operations of the form (x << C1) op C2, check if we can use a smaller
   1992     // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
   1993     SDValue N0 = Node->getOperand(0);
   1994     SDValue N1 = Node->getOperand(1);
   1995 
   1996     if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
   1997       break;
   1998 
   1999     // i8 is unshrinkable, i16 should be promoted to i32.
   2000     if (NVT != MVT::i32 && NVT != MVT::i64)
   2001       break;
   2002 
   2003     ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
   2004     ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
   2005     if (!Cst || !ShlCst)
   2006       break;
   2007 
   2008     int64_t Val = Cst->getSExtValue();
   2009     uint64_t ShlVal = ShlCst->getZExtValue();
   2010 
   2011     // Make sure that we don't change the operation by removing bits.
   2012     // This only matters for OR and XOR, AND is unaffected.
   2013     if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val)
   2014       break;
   2015 
   2016     unsigned ShlOp, Op = 0;
   2017     EVT CstVT = NVT;
   2018 
   2019     // Check the minimum bitwidth for the new constant.
   2020     // TODO: AND32ri is the same as AND64ri32 with zext imm.
   2021     // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
   2022     // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
   2023     if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
   2024       CstVT = MVT::i8;
   2025     else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
   2026       CstVT = MVT::i32;
   2027 
   2028     // Bail if there is no smaller encoding.
   2029     if (NVT == CstVT)
   2030       break;
   2031 
   2032     switch (NVT.getSimpleVT().SimpleTy) {
   2033     default: llvm_unreachable("Unsupported VT!");
   2034     case MVT::i32:
   2035       assert(CstVT == MVT::i8);
   2036       ShlOp = X86::SHL32ri;
   2037 
   2038       switch (Opcode) {
   2039       case ISD::AND: Op = X86::AND32ri8; break;
   2040       case ISD::OR:  Op =  X86::OR32ri8; break;
   2041       case ISD::XOR: Op = X86::XOR32ri8; break;
   2042       }
   2043       break;
   2044     case MVT::i64:
   2045       assert(CstVT == MVT::i8 || CstVT == MVT::i32);
   2046       ShlOp = X86::SHL64ri;
   2047 
   2048       switch (Opcode) {
   2049       case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
   2050       case ISD::OR:  Op = CstVT==MVT::i8?  X86::OR64ri8 :  X86::OR64ri32; break;
   2051       case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
   2052       }
   2053       break;
   2054     }
   2055 
   2056     // Emit the smaller op and the shift.
   2057     SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
   2058     SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
   2059     return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
   2060                                 getI8Imm(ShlVal));
   2061   }
   2062   case X86ISD::UMUL: {
   2063     SDValue N0 = Node->getOperand(0);
   2064     SDValue N1 = Node->getOperand(1);
   2065 
   2066     unsigned LoReg;
   2067     switch (NVT.getSimpleVT().SimpleTy) {
   2068     default: llvm_unreachable("Unsupported VT!");
   2069     case MVT::i8:  LoReg = X86::AL;  Opc = X86::MUL8r; break;
   2070     case MVT::i16: LoReg = X86::AX;  Opc = X86::MUL16r; break;
   2071     case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
   2072     case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
   2073     }
   2074 
   2075     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
   2076                                           N0, SDValue()).getValue(1);
   2077 
   2078     SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
   2079     SDValue Ops[] = {N1, InFlag};
   2080     SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
   2081 
   2082     ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
   2083     ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
   2084     ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
   2085     return NULL;
   2086   }
   2087 
   2088   case ISD::SMUL_LOHI:
   2089   case ISD::UMUL_LOHI: {
   2090     SDValue N0 = Node->getOperand(0);
   2091     SDValue N1 = Node->getOperand(1);
   2092 
   2093     bool isSigned = Opcode == ISD::SMUL_LOHI;
   2094     if (!isSigned) {
   2095       switch (NVT.getSimpleVT().SimpleTy) {
   2096       default: llvm_unreachable("Unsupported VT!");
   2097       case MVT::i8:  Opc = X86::MUL8r;  MOpc = X86::MUL8m;  break;
   2098       case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
   2099       case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
   2100       case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
   2101       }
   2102     } else {
   2103       switch (NVT.getSimpleVT().SimpleTy) {
   2104       default: llvm_unreachable("Unsupported VT!");
   2105       case MVT::i8:  Opc = X86::IMUL8r;  MOpc = X86::IMUL8m;  break;
   2106       case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
   2107       case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
   2108       case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
   2109       }
   2110     }
   2111 
   2112     unsigned LoReg, HiReg;
   2113     switch (NVT.getSimpleVT().SimpleTy) {
   2114     default: llvm_unreachable("Unsupported VT!");
   2115     case MVT::i8:  LoReg = X86::AL;  HiReg = X86::AH;  break;
   2116     case MVT::i16: LoReg = X86::AX;  HiReg = X86::DX;  break;
   2117     case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
   2118     case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
   2119     }
   2120 
   2121     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   2122     bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
   2123     // Multiply is commmutative.
   2124     if (!foldedLoad) {
   2125       foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
   2126       if (foldedLoad)
   2127         std::swap(N0, N1);
   2128     }
   2129 
   2130     SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
   2131                                             N0, SDValue()).getValue(1);
   2132 
   2133     if (foldedLoad) {
   2134       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
   2135                         InFlag };
   2136       SDNode *CNode =
   2137         CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
   2138                                array_lengthof(Ops));
   2139       InFlag = SDValue(CNode, 1);
   2140 
   2141       // Update the chain.
   2142       ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
   2143     } else {
   2144       SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
   2145       InFlag = SDValue(CNode, 0);
   2146     }
   2147 
   2148     // Prevent use of AH in a REX instruction by referencing AX instead.
   2149     if (HiReg == X86::AH && Subtarget->is64Bit() &&
   2150         !SDValue(Node, 1).use_empty()) {
   2151       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2152                                               X86::AX, MVT::i16, InFlag);
   2153       InFlag = Result.getValue(2);
   2154       // Get the low part if needed. Don't use getCopyFromReg for aliasing
   2155       // registers.
   2156       if (!SDValue(Node, 0).use_empty())
   2157         ReplaceUses(SDValue(Node, 1),
   2158           CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2159 
   2160       // Shift AX down 8 bits.
   2161       Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
   2162                                               Result,
   2163                                      CurDAG->getTargetConstant(8, MVT::i8)), 0);
   2164       // Then truncate it down to i8.
   2165       ReplaceUses(SDValue(Node, 1),
   2166         CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2167     }
   2168     // Copy the low half of the result, if it is needed.
   2169     if (!SDValue(Node, 0).use_empty()) {
   2170       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2171                                                 LoReg, NVT, InFlag);
   2172       InFlag = Result.getValue(2);
   2173       ReplaceUses(SDValue(Node, 0), Result);
   2174       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
   2175     }
   2176     // Copy the high half of the result, if it is needed.
   2177     if (!SDValue(Node, 1).use_empty()) {
   2178       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2179                                               HiReg, NVT, InFlag);
   2180       InFlag = Result.getValue(2);
   2181       ReplaceUses(SDValue(Node, 1), Result);
   2182       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
   2183     }
   2184 
   2185     return NULL;
   2186   }
   2187 
   2188   case ISD::SDIVREM:
   2189   case ISD::UDIVREM: {
   2190     SDValue N0 = Node->getOperand(0);
   2191     SDValue N1 = Node->getOperand(1);
   2192 
   2193     bool isSigned = Opcode == ISD::SDIVREM;
   2194     if (!isSigned) {
   2195       switch (NVT.getSimpleVT().SimpleTy) {
   2196       default: llvm_unreachable("Unsupported VT!");
   2197       case MVT::i8:  Opc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
   2198       case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
   2199       case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
   2200       case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
   2201       }
   2202     } else {
   2203       switch (NVT.getSimpleVT().SimpleTy) {
   2204       default: llvm_unreachable("Unsupported VT!");
   2205       case MVT::i8:  Opc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
   2206       case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
   2207       case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
   2208       case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
   2209       }
   2210     }
   2211 
   2212     unsigned LoReg, HiReg, ClrReg;
   2213     unsigned ClrOpcode, SExtOpcode;
   2214     switch (NVT.getSimpleVT().SimpleTy) {
   2215     default: llvm_unreachable("Unsupported VT!");
   2216     case MVT::i8:
   2217       LoReg = X86::AL;  ClrReg = HiReg = X86::AH;
   2218       ClrOpcode  = 0;
   2219       SExtOpcode = X86::CBW;
   2220       break;
   2221     case MVT::i16:
   2222       LoReg = X86::AX;  HiReg = X86::DX;
   2223       ClrOpcode  = X86::MOV16r0; ClrReg = X86::DX;
   2224       SExtOpcode = X86::CWD;
   2225       break;
   2226     case MVT::i32:
   2227       LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
   2228       ClrOpcode  = X86::MOV32r0;
   2229       SExtOpcode = X86::CDQ;
   2230       break;
   2231     case MVT::i64:
   2232       LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
   2233       ClrOpcode  = X86::MOV64r0;
   2234       SExtOpcode = X86::CQO;
   2235       break;
   2236     }
   2237 
   2238     SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
   2239     bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
   2240     bool signBitIsZero = CurDAG->SignBitIsZero(N0);
   2241 
   2242     SDValue InFlag;
   2243     if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
   2244       // Special case for div8, just use a move with zero extension to AX to
   2245       // clear the upper 8 bits (AH).
   2246       SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
   2247       if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
   2248         SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
   2249         Move =
   2250           SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
   2251                                          MVT::Other, Ops,
   2252                                          array_lengthof(Ops)), 0);
   2253         Chain = Move.getValue(1);
   2254         ReplaceUses(N0.getValue(1), Chain);
   2255       } else {
   2256         Move =
   2257           SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
   2258         Chain = CurDAG->getEntryNode();
   2259       }
   2260       Chain  = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
   2261       InFlag = Chain.getValue(1);
   2262     } else {
   2263       InFlag =
   2264         CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
   2265                              LoReg, N0, SDValue()).getValue(1);
   2266       if (isSigned && !signBitIsZero) {
   2267         // Sign extend the low part into the high part.
   2268         InFlag =
   2269           SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
   2270       } else {
   2271         // Zero out the high part, effectively zero extending the input.
   2272         SDValue ClrNode =
   2273           SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
   2274         InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
   2275                                       ClrNode, InFlag).getValue(1);
   2276       }
   2277     }
   2278 
   2279     if (foldedLoad) {
   2280       SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
   2281                         InFlag };
   2282       SDNode *CNode =
   2283         CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
   2284                                array_lengthof(Ops));
   2285       InFlag = SDValue(CNode, 1);
   2286       // Update the chain.
   2287       ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
   2288     } else {
   2289       InFlag =
   2290         SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
   2291     }
   2292 
   2293     // Prevent use of AH in a REX instruction by referencing AX instead.
   2294     // Shift it down 8 bits.
   2295     if (HiReg == X86::AH && Subtarget->is64Bit() &&
   2296         !SDValue(Node, 1).use_empty()) {
   2297       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2298                                               X86::AX, MVT::i16, InFlag);
   2299       InFlag = Result.getValue(2);
   2300 
   2301       // If we also need AL (the quotient), get it by extracting a subreg from
   2302       // Result. The fast register allocator does not like multiple CopyFromReg
   2303       // nodes using aliasing registers.
   2304       if (!SDValue(Node, 0).use_empty())
   2305         ReplaceUses(SDValue(Node, 0),
   2306           CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2307 
   2308       // Shift AX right by 8 bits instead of using AH.
   2309       Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
   2310                                          Result,
   2311                                          CurDAG->getTargetConstant(8, MVT::i8)),
   2312                        0);
   2313       ReplaceUses(SDValue(Node, 1),
   2314         CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
   2315     }
   2316     // Copy the division (low) result, if it is needed.
   2317     if (!SDValue(Node, 0).use_empty()) {
   2318       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2319                                                 LoReg, NVT, InFlag);
   2320       InFlag = Result.getValue(2);
   2321       ReplaceUses(SDValue(Node, 0), Result);
   2322       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
   2323     }
   2324     // Copy the remainder (high) result, if it is needed.
   2325     if (!SDValue(Node, 1).use_empty()) {
   2326       SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
   2327                                               HiReg, NVT, InFlag);
   2328       InFlag = Result.getValue(2);
   2329       ReplaceUses(SDValue(Node, 1), Result);
   2330       DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
   2331     }
   2332     return NULL;
   2333   }
   2334 
   2335   case X86ISD::CMP: {
   2336     SDValue N0 = Node->getOperand(0);
   2337     SDValue N1 = Node->getOperand(1);
   2338 
   2339     // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
   2340     // use a smaller encoding.
   2341     if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
   2342         HasNoSignedComparisonUses(Node))
   2343       // Look past the truncate if CMP is the only use of it.
   2344       N0 = N0.getOperand(0);
   2345     if ((N0.getNode()->getOpcode() == ISD::AND ||
   2346          (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
   2347         N0.getNode()->hasOneUse() &&
   2348         N0.getValueType() != MVT::i8 &&
   2349         X86::isZeroNode(N1)) {
   2350       ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
   2351       if (!C) break;
   2352 
   2353       // For example, convert "testl %eax, $8" to "testb %al, $8"
   2354       if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
   2355           (!(C->getZExtValue() & 0x80) ||
   2356            HasNoSignedComparisonUses(Node))) {
   2357         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
   2358         SDValue Reg = N0.getNode()->getOperand(0);
   2359 
   2360         // On x86-32, only the ABCD registers have 8-bit subregisters.
   2361         if (!Subtarget->is64Bit()) {
   2362           const TargetRegisterClass *TRC;
   2363           switch (N0.getValueType().getSimpleVT().SimpleTy) {
   2364           case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
   2365           case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
   2366           default: llvm_unreachable("Unsupported TEST operand type!");
   2367           }
   2368           SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
   2369           Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
   2370                                                Reg.getValueType(), Reg, RC), 0);
   2371         }
   2372 
   2373         // Extract the l-register.
   2374         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
   2375                                                         MVT::i8, Reg);
   2376 
   2377         // Emit a testb.
   2378         return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
   2379       }
   2380 
   2381       // For example, "testl %eax, $2048" to "testb %ah, $8".
   2382       if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
   2383           (!(C->getZExtValue() & 0x8000) ||
   2384            HasNoSignedComparisonUses(Node))) {
   2385         // Shift the immediate right by 8 bits.
   2386         SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
   2387                                                        MVT::i8);
   2388         SDValue Reg = N0.getNode()->getOperand(0);
   2389 
   2390         // Put the value in an ABCD register.
   2391         const TargetRegisterClass *TRC;
   2392         switch (N0.getValueType().getSimpleVT().SimpleTy) {
   2393         case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
   2394         case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
   2395         case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
   2396         default: llvm_unreachable("Unsupported TEST operand type!");
   2397         }
   2398         SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
   2399         Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
   2400                                              Reg.getValueType(), Reg, RC), 0);
   2401 
   2402         // Extract the h-register.
   2403         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
   2404                                                         MVT::i8, Reg);
   2405 
   2406         // Emit a testb.  The EXTRACT_SUBREG becomes a COPY that can only
   2407         // target GR8_NOREX registers, so make sure the register class is
   2408         // forced.
   2409         return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32,
   2410                                       Subreg, ShiftedImm);
   2411       }
   2412 
   2413       // For example, "testl %eax, $32776" to "testw %ax, $32776".
   2414       if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
   2415           N0.getValueType() != MVT::i16 &&
   2416           (!(C->getZExtValue() & 0x8000) ||
   2417            HasNoSignedComparisonUses(Node))) {
   2418         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
   2419         SDValue Reg = N0.getNode()->getOperand(0);
   2420 
   2421         // Extract the 16-bit subregister.
   2422         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
   2423                                                         MVT::i16, Reg);
   2424 
   2425         // Emit a testw.
   2426         return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
   2427       }
   2428 
   2429       // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
   2430       if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
   2431           N0.getValueType() == MVT::i64 &&
   2432           (!(C->getZExtValue() & 0x80000000) ||
   2433            HasNoSignedComparisonUses(Node))) {
   2434         SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
   2435         SDValue Reg = N0.getNode()->getOperand(0);
   2436 
   2437         // Extract the 32-bit subregister.
   2438         SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
   2439                                                         MVT::i32, Reg);
   2440 
   2441         // Emit a testl.
   2442         return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
   2443       }
   2444     }
   2445     break;
   2446   }
   2447   case ISD::STORE: {
   2448     // Change a chain of {load; incr or dec; store} of the same value into
   2449     // a simple increment or decrement through memory of that value, if the
   2450     // uses of the modified value and its address are suitable.
   2451     // The DEC64m tablegen pattern is currently not able to match the case where
   2452     // the EFLAGS on the original DEC are used. (This also applies to
   2453     // {INC,DEC}X{64,32,16,8}.)
   2454     // We'll need to improve tablegen to allow flags to be transferred from a
   2455     // node in the pattern to the result node.  probably with a new keyword
   2456     // for example, we have this
   2457     // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
   2458     //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
   2459     //   (implicit EFLAGS)]>;
   2460     // but maybe need something like this
   2461     // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
   2462     //  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
   2463     //   (transferrable EFLAGS)]>;
   2464 
   2465     StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
   2466     SDValue StoredVal = StoreNode->getOperand(1);
   2467     unsigned Opc = StoredVal->getOpcode();
   2468 
   2469     LoadSDNode *LoadNode = 0;
   2470     SDValue InputChain;
   2471     if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
   2472                              LoadNode, InputChain))
   2473       break;
   2474 
   2475     SDValue Base, Scale, Index, Disp, Segment;
   2476     if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
   2477                     Base, Scale, Index, Disp, Segment))
   2478       break;
   2479 
   2480     MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
   2481     MemOp[0] = StoreNode->getMemOperand();
   2482     MemOp[1] = LoadNode->getMemOperand();
   2483     const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
   2484     EVT LdVT = LoadNode->getMemoryVT();
   2485     unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
   2486     MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
   2487                                                    Node->getDebugLoc(),
   2488                                                    MVT::i32, MVT::Other, Ops,
   2489                                                    array_lengthof(Ops));
   2490     Result->setMemRefs(MemOp, MemOp + 2);
   2491 
   2492     ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
   2493     ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
   2494 
   2495     return Result;
   2496   }
   2497   }
   2498 
   2499   SDNode *ResNode = SelectCode(Node);
   2500 
   2501   DEBUG(dbgs() << "=> ";
   2502         if (ResNode == NULL || ResNode == Node)
   2503           Node->dump(CurDAG);
   2504         else
   2505           ResNode->dump(CurDAG);
   2506         dbgs() << '\n');
   2507 
   2508   return ResNode;
   2509 }
   2510 
   2511 bool X86DAGToDAGISel::
   2512 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
   2513                              std::vector<SDValue> &OutOps) {
   2514   SDValue Op0, Op1, Op2, Op3, Op4;
   2515   switch (ConstraintCode) {
   2516   case 'o':   // offsetable        ??
   2517   case 'v':   // not offsetable    ??
   2518   default: return true;
   2519   case 'm':   // memory
   2520     if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
   2521       return true;
   2522     break;
   2523   }
   2524 
   2525   OutOps.push_back(Op0);
   2526   OutOps.push_back(Op1);
   2527   OutOps.push_back(Op2);
   2528   OutOps.push_back(Op3);
   2529   OutOps.push_back(Op4);
   2530   return false;
   2531 }
   2532 
   2533 /// createX86ISelDag - This pass converts a legalized DAG into a
   2534 /// X86-specific DAG, ready for instruction scheduling.
   2535 ///
   2536 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
   2537                                      CodeGenOpt::Level OptLevel) {
   2538   return new X86DAGToDAGISel(TM, OptLevel);
   2539 }
   2540