Home | History | Annotate | Download | only in SelectionDAG
      1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #define DEBUG_TYPE "isel"
     15 #include "SelectionDAGBuilder.h"
     16 #include "SDNodeDbgValue.h"
     17 #include "llvm/ADT/BitVector.h"
     18 #include "llvm/ADT/Optional.h"
     19 #include "llvm/ADT/SmallSet.h"
     20 #include "llvm/Analysis/AliasAnalysis.h"
     21 #include "llvm/Analysis/BranchProbabilityInfo.h"
     22 #include "llvm/Analysis/ConstantFolding.h"
     23 #include "llvm/Analysis/ValueTracking.h"
     24 #include "llvm/CodeGen/Analysis.h"
     25 #include "llvm/CodeGen/FastISel.h"
     26 #include "llvm/CodeGen/FunctionLoweringInfo.h"
     27 #include "llvm/CodeGen/GCMetadata.h"
     28 #include "llvm/CodeGen/GCStrategy.h"
     29 #include "llvm/CodeGen/MachineFrameInfo.h"
     30 #include "llvm/CodeGen/MachineFunction.h"
     31 #include "llvm/CodeGen/MachineInstrBuilder.h"
     32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
     33 #include "llvm/CodeGen/MachineModuleInfo.h"
     34 #include "llvm/CodeGen/MachineRegisterInfo.h"
     35 #include "llvm/CodeGen/SelectionDAG.h"
     36 #include "llvm/DebugInfo.h"
     37 #include "llvm/IR/CallingConv.h"
     38 #include "llvm/IR/Constants.h"
     39 #include "llvm/IR/DataLayout.h"
     40 #include "llvm/IR/DerivedTypes.h"
     41 #include "llvm/IR/Function.h"
     42 #include "llvm/IR/GlobalVariable.h"
     43 #include "llvm/IR/InlineAsm.h"
     44 #include "llvm/IR/Instructions.h"
     45 #include "llvm/IR/IntrinsicInst.h"
     46 #include "llvm/IR/Intrinsics.h"
     47 #include "llvm/IR/LLVMContext.h"
     48 #include "llvm/IR/Module.h"
     49 #include "llvm/Support/CommandLine.h"
     50 #include "llvm/Support/Debug.h"
     51 #include "llvm/Support/ErrorHandling.h"
     52 #include "llvm/Support/IntegersSubsetMapping.h"
     53 #include "llvm/Support/MathExtras.h"
     54 #include "llvm/Support/raw_ostream.h"
     55 #include "llvm/Target/TargetFrameLowering.h"
     56 #include "llvm/Target/TargetInstrInfo.h"
     57 #include "llvm/Target/TargetIntrinsicInfo.h"
     58 #include "llvm/Target/TargetLibraryInfo.h"
     59 #include "llvm/Target/TargetLowering.h"
     60 #include "llvm/Target/TargetOptions.h"
     61 #include <algorithm>
     62 using namespace llvm;
     63 
     64 /// LimitFloatPrecision - Generate low-precision inline sequences for
     65 /// some float libcalls (6, 8 or 12 bits).
     66 static unsigned LimitFloatPrecision;
     67 
     68 static cl::opt<unsigned, true>
     69 LimitFPPrecision("limit-float-precision",
     70                  cl::desc("Generate low-precision inline sequences "
     71                           "for some float libcalls"),
     72                  cl::location(LimitFloatPrecision),
     73                  cl::init(0));
     74 
     75 // Limit the width of DAG chains. This is important in general to prevent
     76 // prevent DAG-based analysis from blowing up. For example, alias analysis and
     77 // load clustering may not complete in reasonable time. It is difficult to
     78 // recognize and avoid this situation within each individual analysis, and
     79 // future analyses are likely to have the same behavior. Limiting DAG width is
     80 // the safe approach, and will be especially important with global DAGs.
     81 //
     82 // MaxParallelChains default is arbitrarily high to avoid affecting
     83 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
     84 // sequence over this should have been converted to llvm.memcpy by the
     85 // frontend. It easy to induce this behavior with .ll code such as:
     86 // %buffer = alloca [4096 x i8]
     87 // %data = load [4096 x i8]* %argPtr
     88 // store [4096 x i8] %data, [4096 x i8]* %buffer
     89 static const unsigned MaxParallelChains = 64;
     90 
     91 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
     92                                       const SDValue *Parts, unsigned NumParts,
     93                                       MVT PartVT, EVT ValueVT, const Value *V);
     94 
     95 /// getCopyFromParts - Create a value that contains the specified legal parts
     96 /// combined into the value they represent.  If the parts combine to a type
     97 /// larger then ValueVT then AssertOp can be used to specify whether the extra
     98 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
     99 /// (ISD::AssertSext).
    100 static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
    101                                 const SDValue *Parts,
    102                                 unsigned NumParts, MVT PartVT, EVT ValueVT,
    103                                 const Value *V,
    104                                 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
    105   if (ValueVT.isVector())
    106     return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
    107                                   PartVT, ValueVT, V);
    108 
    109   assert(NumParts > 0 && "No parts to assemble!");
    110   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    111   SDValue Val = Parts[0];
    112 
    113   if (NumParts > 1) {
    114     // Assemble the value from multiple parts.
    115     if (ValueVT.isInteger()) {
    116       unsigned PartBits = PartVT.getSizeInBits();
    117       unsigned ValueBits = ValueVT.getSizeInBits();
    118 
    119       // Assemble the power of 2 part.
    120       unsigned RoundParts = NumParts & (NumParts - 1) ?
    121         1 << Log2_32(NumParts) : NumParts;
    122       unsigned RoundBits = PartBits * RoundParts;
    123       EVT RoundVT = RoundBits == ValueBits ?
    124         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
    125       SDValue Lo, Hi;
    126 
    127       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
    128 
    129       if (RoundParts > 2) {
    130         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
    131                               PartVT, HalfVT, V);
    132         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
    133                               RoundParts / 2, PartVT, HalfVT, V);
    134       } else {
    135         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
    136         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
    137       }
    138 
    139       if (TLI.isBigEndian())
    140         std::swap(Lo, Hi);
    141 
    142       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
    143 
    144       if (RoundParts < NumParts) {
    145         // Assemble the trailing non-power-of-2 part.
    146         unsigned OddParts = NumParts - RoundParts;
    147         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
    148         Hi = getCopyFromParts(DAG, DL,
    149                               Parts + RoundParts, OddParts, PartVT, OddVT, V);
    150 
    151         // Combine the round and odd parts.
    152         Lo = Val;
    153         if (TLI.isBigEndian())
    154           std::swap(Lo, Hi);
    155         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    156         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
    157         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
    158                          DAG.getConstant(Lo.getValueType().getSizeInBits(),
    159                                          TLI.getPointerTy()));
    160         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
    161         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
    162       }
    163     } else if (PartVT.isFloatingPoint()) {
    164       // FP split into multiple FP parts (for ppcf128)
    165       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
    166              "Unexpected split");
    167       SDValue Lo, Hi;
    168       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
    169       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
    170       if (TLI.isBigEndian())
    171         std::swap(Lo, Hi);
    172       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
    173     } else {
    174       // FP split into integer parts (soft fp)
    175       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
    176              !PartVT.isVector() && "Unexpected split");
    177       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
    178       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
    179     }
    180   }
    181 
    182   // There is now one part, held in Val.  Correct it to match ValueVT.
    183   EVT PartEVT = Val.getValueType();
    184 
    185   if (PartEVT == ValueVT)
    186     return Val;
    187 
    188   if (PartEVT.isInteger() && ValueVT.isInteger()) {
    189     if (ValueVT.bitsLT(PartEVT)) {
    190       // For a truncate, see if we have any information to
    191       // indicate whether the truncated bits will always be
    192       // zero or sign-extension.
    193       if (AssertOp != ISD::DELETED_NODE)
    194         Val = DAG.getNode(AssertOp, DL, PartEVT, Val,
    195                           DAG.getValueType(ValueVT));
    196       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
    197     }
    198     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
    199   }
    200 
    201   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
    202     // FP_ROUND's are always exact here.
    203     if (ValueVT.bitsLT(Val.getValueType()))
    204       return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
    205                          DAG.getTargetConstant(1, TLI.getPointerTy()));
    206 
    207     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
    208   }
    209 
    210   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
    211     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
    212 
    213   llvm_unreachable("Unknown mismatch!");
    214 }
    215 
    216 /// getCopyFromPartsVector - Create a value that contains the specified legal
    217 /// parts combined into the value they represent.  If the parts combine to a
    218 /// type larger then ValueVT then AssertOp can be used to specify whether the
    219 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
    220 /// ValueVT (ISD::AssertSext).
    221 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
    222                                       const SDValue *Parts, unsigned NumParts,
    223                                       MVT PartVT, EVT ValueVT, const Value *V) {
    224   assert(ValueVT.isVector() && "Not a vector value");
    225   assert(NumParts > 0 && "No parts to assemble!");
    226   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    227   SDValue Val = Parts[0];
    228 
    229   // Handle a multi-element vector.
    230   if (NumParts > 1) {
    231     EVT IntermediateVT;
    232     MVT RegisterVT;
    233     unsigned NumIntermediates;
    234     unsigned NumRegs =
    235     TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
    236                                NumIntermediates, RegisterVT);
    237     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
    238     NumParts = NumRegs; // Silence a compiler warning.
    239     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
    240     assert(RegisterVT == Parts[0].getSimpleValueType() &&
    241            "Part type doesn't match part!");
    242 
    243     // Assemble the parts into intermediate operands.
    244     SmallVector<SDValue, 8> Ops(NumIntermediates);
    245     if (NumIntermediates == NumParts) {
    246       // If the register was not expanded, truncate or copy the value,
    247       // as appropriate.
    248       for (unsigned i = 0; i != NumParts; ++i)
    249         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
    250                                   PartVT, IntermediateVT, V);
    251     } else if (NumParts > 0) {
    252       // If the intermediate type was expanded, build the intermediate
    253       // operands from the parts.
    254       assert(NumParts % NumIntermediates == 0 &&
    255              "Must expand into a divisible number of parts!");
    256       unsigned Factor = NumParts / NumIntermediates;
    257       for (unsigned i = 0; i != NumIntermediates; ++i)
    258         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
    259                                   PartVT, IntermediateVT, V);
    260     }
    261 
    262     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
    263     // intermediate operands.
    264     Val = DAG.getNode(IntermediateVT.isVector() ?
    265                       ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL,
    266                       ValueVT, &Ops[0], NumIntermediates);
    267   }
    268 
    269   // There is now one part, held in Val.  Correct it to match ValueVT.
    270   EVT PartEVT = Val.getValueType();
    271 
    272   if (PartEVT == ValueVT)
    273     return Val;
    274 
    275   if (PartEVT.isVector()) {
    276     // If the element type of the source/dest vectors are the same, but the
    277     // parts vector has more elements than the value vector, then we have a
    278     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
    279     // elements we want.
    280     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
    281       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
    282              "Cannot narrow, it would be a lossy transformation");
    283       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
    284                          DAG.getConstant(0, TLI.getVectorIdxTy()));
    285     }
    286 
    287     // Vector/Vector bitcast.
    288     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
    289       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
    290 
    291     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
    292       "Cannot handle this kind of promotion");
    293     // Promoted vector extract
    294     bool Smaller = ValueVT.bitsLE(PartEVT);
    295     return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    296                        DL, ValueVT, Val);
    297 
    298   }
    299 
    300   // Trivial bitcast if the types are the same size and the destination
    301   // vector type is legal.
    302   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
    303       TLI.isTypeLegal(ValueVT))
    304     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
    305 
    306   // Handle cases such as i8 -> <1 x i1>
    307   if (ValueVT.getVectorNumElements() != 1) {
    308     LLVMContext &Ctx = *DAG.getContext();
    309     Twine ErrMsg("non-trivial scalar-to-vector conversion");
    310     if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
    311       if (const CallInst *CI = dyn_cast<CallInst>(I))
    312         if (isa<InlineAsm>(CI->getCalledValue()))
    313           ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
    314       Ctx.emitError(I, ErrMsg);
    315     } else {
    316       Ctx.emitError(ErrMsg);
    317     }
    318     return DAG.getUNDEF(ValueVT);
    319   }
    320 
    321   if (ValueVT.getVectorNumElements() == 1 &&
    322       ValueVT.getVectorElementType() != PartEVT) {
    323     bool Smaller = ValueVT.bitsLE(PartEVT);
    324     Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    325                        DL, ValueVT.getScalarType(), Val);
    326   }
    327 
    328   return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
    329 }
    330 
    331 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl,
    332                                  SDValue Val, SDValue *Parts, unsigned NumParts,
    333                                  MVT PartVT, const Value *V);
    334 
    335 /// getCopyToParts - Create a series of nodes that contain the specified value
    336 /// split into legal parts.  If the parts contain more bits than Val, then, for
    337 /// integers, ExtendKind can be used to specify how to generate the extra bits.
    338 static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
    339                            SDValue Val, SDValue *Parts, unsigned NumParts,
    340                            MVT PartVT, const Value *V,
    341                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
    342   EVT ValueVT = Val.getValueType();
    343 
    344   // Handle the vector case separately.
    345   if (ValueVT.isVector())
    346     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
    347 
    348   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    349   unsigned PartBits = PartVT.getSizeInBits();
    350   unsigned OrigNumParts = NumParts;
    351   assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
    352 
    353   if (NumParts == 0)
    354     return;
    355 
    356   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
    357   EVT PartEVT = PartVT;
    358   if (PartEVT == ValueVT) {
    359     assert(NumParts == 1 && "No-op copy with multiple parts!");
    360     Parts[0] = Val;
    361     return;
    362   }
    363 
    364   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
    365     // If the parts cover more bits than the value has, promote the value.
    366     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
    367       assert(NumParts == 1 && "Do not know what to promote to!");
    368       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
    369     } else {
    370       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
    371              ValueVT.isInteger() &&
    372              "Unknown mismatch!");
    373       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    374       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
    375       if (PartVT == MVT::x86mmx)
    376         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    377     }
    378   } else if (PartBits == ValueVT.getSizeInBits()) {
    379     // Different types of the same size.
    380     assert(NumParts == 1 && PartEVT != ValueVT);
    381     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    382   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
    383     // If the parts cover less bits than value has, truncate the value.
    384     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
    385            ValueVT.isInteger() &&
    386            "Unknown mismatch!");
    387     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    388     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
    389     if (PartVT == MVT::x86mmx)
    390       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    391   }
    392 
    393   // The value may have changed - recompute ValueVT.
    394   ValueVT = Val.getValueType();
    395   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
    396          "Failed to tile the value with PartVT!");
    397 
    398   if (NumParts == 1) {
    399     if (PartEVT != ValueVT) {
    400       LLVMContext &Ctx = *DAG.getContext();
    401       Twine ErrMsg("scalar-to-vector conversion failed");
    402       if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
    403         if (const CallInst *CI = dyn_cast<CallInst>(I))
    404           if (isa<InlineAsm>(CI->getCalledValue()))
    405             ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
    406         Ctx.emitError(I, ErrMsg);
    407       } else {
    408         Ctx.emitError(ErrMsg);
    409       }
    410     }
    411 
    412     Parts[0] = Val;
    413     return;
    414   }
    415 
    416   // Expand the value into multiple parts.
    417   if (NumParts & (NumParts - 1)) {
    418     // The number of parts is not a power of 2.  Split off and copy the tail.
    419     assert(PartVT.isInteger() && ValueVT.isInteger() &&
    420            "Do not know what to expand to!");
    421     unsigned RoundParts = 1 << Log2_32(NumParts);
    422     unsigned RoundBits = RoundParts * PartBits;
    423     unsigned OddParts = NumParts - RoundParts;
    424     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
    425                                  DAG.getIntPtrConstant(RoundBits));
    426     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
    427 
    428     if (TLI.isBigEndian())
    429       // The odd parts were reversed by getCopyToParts - unreverse them.
    430       std::reverse(Parts + RoundParts, Parts + NumParts);
    431 
    432     NumParts = RoundParts;
    433     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    434     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
    435   }
    436 
    437   // The number of parts is a power of 2.  Repeatedly bisect the value using
    438   // EXTRACT_ELEMENT.
    439   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
    440                          EVT::getIntegerVT(*DAG.getContext(),
    441                                            ValueVT.getSizeInBits()),
    442                          Val);
    443 
    444   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
    445     for (unsigned i = 0; i < NumParts; i += StepSize) {
    446       unsigned ThisBits = StepSize * PartBits / 2;
    447       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
    448       SDValue &Part0 = Parts[i];
    449       SDValue &Part1 = Parts[i+StepSize/2];
    450 
    451       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
    452                           ThisVT, Part0, DAG.getIntPtrConstant(1));
    453       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
    454                           ThisVT, Part0, DAG.getIntPtrConstant(0));
    455 
    456       if (ThisBits == PartBits && ThisVT != PartVT) {
    457         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
    458         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
    459       }
    460     }
    461   }
    462 
    463   if (TLI.isBigEndian())
    464     std::reverse(Parts, Parts + OrigNumParts);
    465 }
    466 
    467 
    468 /// getCopyToPartsVector - Create a series of nodes that contain the specified
    469 /// value split into legal parts.
    470 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
    471                                  SDValue Val, SDValue *Parts, unsigned NumParts,
    472                                  MVT PartVT, const Value *V) {
    473   EVT ValueVT = Val.getValueType();
    474   assert(ValueVT.isVector() && "Not a vector");
    475   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    476 
    477   if (NumParts == 1) {
    478     EVT PartEVT = PartVT;
    479     if (PartEVT == ValueVT) {
    480       // Nothing to do.
    481     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
    482       // Bitconvert vector->vector case.
    483       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    484     } else if (PartVT.isVector() &&
    485                PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
    486                PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
    487       EVT ElementVT = PartVT.getVectorElementType();
    488       // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
    489       // undef elements.
    490       SmallVector<SDValue, 16> Ops;
    491       for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
    492         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
    493                                   ElementVT, Val, DAG.getConstant(i,
    494                                                   TLI.getVectorIdxTy())));
    495 
    496       for (unsigned i = ValueVT.getVectorNumElements(),
    497            e = PartVT.getVectorNumElements(); i != e; ++i)
    498         Ops.push_back(DAG.getUNDEF(ElementVT));
    499 
    500       Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, &Ops[0], Ops.size());
    501 
    502       // FIXME: Use CONCAT for 2x -> 4x.
    503 
    504       //SDValue UndefElts = DAG.getUNDEF(VectorTy);
    505       //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
    506     } else if (PartVT.isVector() &&
    507                PartEVT.getVectorElementType().bitsGE(
    508                  ValueVT.getVectorElementType()) &&
    509                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
    510 
    511       // Promoted vector extract
    512       bool Smaller = PartEVT.bitsLE(ValueVT);
    513       Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    514                         DL, PartVT, Val);
    515     } else{
    516       // Vector -> scalar conversion.
    517       assert(ValueVT.getVectorNumElements() == 1 &&
    518              "Only trivial vector-to-scalar conversions should get here!");
    519       Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
    520                         PartVT, Val, DAG.getConstant(0, TLI.getVectorIdxTy()));
    521 
    522       bool Smaller = ValueVT.bitsLE(PartVT);
    523       Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    524                          DL, PartVT, Val);
    525     }
    526 
    527     Parts[0] = Val;
    528     return;
    529   }
    530 
    531   // Handle a multi-element vector.
    532   EVT IntermediateVT;
    533   MVT RegisterVT;
    534   unsigned NumIntermediates;
    535   unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
    536                                                 IntermediateVT,
    537                                                 NumIntermediates, RegisterVT);
    538   unsigned NumElements = ValueVT.getVectorNumElements();
    539 
    540   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
    541   NumParts = NumRegs; // Silence a compiler warning.
    542   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
    543 
    544   // Split the vector into intermediate operands.
    545   SmallVector<SDValue, 8> Ops(NumIntermediates);
    546   for (unsigned i = 0; i != NumIntermediates; ++i) {
    547     if (IntermediateVT.isVector())
    548       Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
    549                            IntermediateVT, Val,
    550                    DAG.getConstant(i * (NumElements / NumIntermediates),
    551                                    TLI.getVectorIdxTy()));
    552     else
    553       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
    554                            IntermediateVT, Val,
    555                            DAG.getConstant(i, TLI.getVectorIdxTy()));
    556   }
    557 
    558   // Split the intermediate operands into legal parts.
    559   if (NumParts == NumIntermediates) {
    560     // If the register was not expanded, promote or copy the value,
    561     // as appropriate.
    562     for (unsigned i = 0; i != NumParts; ++i)
    563       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
    564   } else if (NumParts > 0) {
    565     // If the intermediate type was expanded, split each the value into
    566     // legal parts.
    567     assert(NumParts % NumIntermediates == 0 &&
    568            "Must expand into a divisible number of parts!");
    569     unsigned Factor = NumParts / NumIntermediates;
    570     for (unsigned i = 0; i != NumIntermediates; ++i)
    571       getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
    572   }
    573 }
    574 
    575 namespace {
    576   /// RegsForValue - This struct represents the registers (physical or virtual)
    577   /// that a particular set of values is assigned, and the type information
    578   /// about the value. The most common situation is to represent one value at a
    579   /// time, but struct or array values are handled element-wise as multiple
    580   /// values.  The splitting of aggregates is performed recursively, so that we
    581   /// never have aggregate-typed registers. The values at this point do not
    582   /// necessarily have legal types, so each value may require one or more
    583   /// registers of some legal type.
    584   ///
    585   struct RegsForValue {
    586     /// ValueVTs - The value types of the values, which may not be legal, and
    587     /// may need be promoted or synthesized from one or more registers.
    588     ///
    589     SmallVector<EVT, 4> ValueVTs;
    590 
    591     /// RegVTs - The value types of the registers. This is the same size as
    592     /// ValueVTs and it records, for each value, what the type of the assigned
    593     /// register or registers are. (Individual values are never synthesized
    594     /// from more than one type of register.)
    595     ///
    596     /// With virtual registers, the contents of RegVTs is redundant with TLI's
    597     /// getRegisterType member function, however when with physical registers
    598     /// it is necessary to have a separate record of the types.
    599     ///
    600     SmallVector<MVT, 4> RegVTs;
    601 
    602     /// Regs - This list holds the registers assigned to the values.
    603     /// Each legal or promoted value requires one register, and each
    604     /// expanded value requires multiple registers.
    605     ///
    606     SmallVector<unsigned, 4> Regs;
    607 
    608     RegsForValue() {}
    609 
    610     RegsForValue(const SmallVector<unsigned, 4> &regs,
    611                  MVT regvt, EVT valuevt)
    612       : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
    613 
    614     RegsForValue(LLVMContext &Context, const TargetLowering &tli,
    615                  unsigned Reg, Type *Ty) {
    616       ComputeValueVTs(tli, Ty, ValueVTs);
    617 
    618       for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
    619         EVT ValueVT = ValueVTs[Value];
    620         unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
    621         MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
    622         for (unsigned i = 0; i != NumRegs; ++i)
    623           Regs.push_back(Reg + i);
    624         RegVTs.push_back(RegisterVT);
    625         Reg += NumRegs;
    626       }
    627     }
    628 
    629     /// areValueTypesLegal - Return true if types of all the values are legal.
    630     bool areValueTypesLegal(const TargetLowering &TLI) {
    631       for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
    632         MVT RegisterVT = RegVTs[Value];
    633         if (!TLI.isTypeLegal(RegisterVT))
    634           return false;
    635       }
    636       return true;
    637     }
    638 
    639     /// append - Add the specified values to this one.
    640     void append(const RegsForValue &RHS) {
    641       ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
    642       RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
    643       Regs.append(RHS.Regs.begin(), RHS.Regs.end());
    644     }
    645 
    646     /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
    647     /// this value and returns the result as a ValueVTs value.  This uses
    648     /// Chain/Flag as the input and updates them for the output Chain/Flag.
    649     /// If the Flag pointer is NULL, no flag is used.
    650     SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
    651                             SDLoc dl,
    652                             SDValue &Chain, SDValue *Flag,
    653                             const Value *V = 0) const;
    654 
    655     /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
    656     /// specified value into the registers specified by this object.  This uses
    657     /// Chain/Flag as the input and updates them for the output Chain/Flag.
    658     /// If the Flag pointer is NULL, no flag is used.
    659     void getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
    660                        SDValue &Chain, SDValue *Flag, const Value *V) const;
    661 
    662     /// AddInlineAsmOperands - Add this value to the specified inlineasm node
    663     /// operand list.  This adds the code marker, matching input operand index
    664     /// (if applicable), and includes the number of values added into it.
    665     void AddInlineAsmOperands(unsigned Kind,
    666                               bool HasMatching, unsigned MatchingIdx,
    667                               SelectionDAG &DAG,
    668                               std::vector<SDValue> &Ops) const;
    669   };
    670 }
    671 
    672 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
    673 /// this value and returns the result as a ValueVT value.  This uses
    674 /// Chain/Flag as the input and updates them for the output Chain/Flag.
    675 /// If the Flag pointer is NULL, no flag is used.
    676 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
    677                                       FunctionLoweringInfo &FuncInfo,
    678                                       SDLoc dl,
    679                                       SDValue &Chain, SDValue *Flag,
    680                                       const Value *V) const {
    681   // A Value with type {} or [0 x %t] needs no registers.
    682   if (ValueVTs.empty())
    683     return SDValue();
    684 
    685   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    686 
    687   // Assemble the legal parts into the final values.
    688   SmallVector<SDValue, 4> Values(ValueVTs.size());
    689   SmallVector<SDValue, 8> Parts;
    690   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
    691     // Copy the legal parts from the registers.
    692     EVT ValueVT = ValueVTs[Value];
    693     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
    694     MVT RegisterVT = RegVTs[Value];
    695 
    696     Parts.resize(NumRegs);
    697     for (unsigned i = 0; i != NumRegs; ++i) {
    698       SDValue P;
    699       if (Flag == 0) {
    700         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
    701       } else {
    702         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
    703         *Flag = P.getValue(2);
    704       }
    705 
    706       Chain = P.getValue(1);
    707       Parts[i] = P;
    708 
    709       // If the source register was virtual and if we know something about it,
    710       // add an assert node.
    711       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
    712           !RegisterVT.isInteger() || RegisterVT.isVector())
    713         continue;
    714 
    715       const FunctionLoweringInfo::LiveOutInfo *LOI =
    716         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
    717       if (!LOI)
    718         continue;
    719 
    720       unsigned RegSize = RegisterVT.getSizeInBits();
    721       unsigned NumSignBits = LOI->NumSignBits;
    722       unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
    723 
    724       if (NumZeroBits == RegSize) {
    725         // The current value is a zero.
    726         // Explicitly express that as it would be easier for
    727         // optimizations to kick in.
    728         Parts[i] = DAG.getConstant(0, RegisterVT);
    729         continue;
    730       }
    731 
    732       // FIXME: We capture more information than the dag can represent.  For
    733       // now, just use the tightest assertzext/assertsext possible.
    734       bool isSExt = true;
    735       EVT FromVT(MVT::Other);
    736       if (NumSignBits == RegSize)
    737         isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
    738       else if (NumZeroBits >= RegSize-1)
    739         isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
    740       else if (NumSignBits > RegSize-8)
    741         isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
    742       else if (NumZeroBits >= RegSize-8)
    743         isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
    744       else if (NumSignBits > RegSize-16)
    745         isSExt = true, FromVT = MVT::i16;  // ASSERT SEXT 16
    746       else if (NumZeroBits >= RegSize-16)
    747         isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
    748       else if (NumSignBits > RegSize-32)
    749         isSExt = true, FromVT = MVT::i32;  // ASSERT SEXT 32
    750       else if (NumZeroBits >= RegSize-32)
    751         isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
    752       else
    753         continue;
    754 
    755       // Add an assertion node.
    756       assert(FromVT != MVT::Other);
    757       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
    758                              RegisterVT, P, DAG.getValueType(FromVT));
    759     }
    760 
    761     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
    762                                      NumRegs, RegisterVT, ValueVT, V);
    763     Part += NumRegs;
    764     Parts.clear();
    765   }
    766 
    767   return DAG.getNode(ISD::MERGE_VALUES, dl,
    768                      DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
    769                      &Values[0], ValueVTs.size());
    770 }
    771 
    772 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
    773 /// specified value into the registers specified by this object.  This uses
    774 /// Chain/Flag as the input and updates them for the output Chain/Flag.
    775 /// If the Flag pointer is NULL, no flag is used.
    776 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
    777                                  SDValue &Chain, SDValue *Flag,
    778                                  const Value *V) const {
    779   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    780 
    781   // Get the list of the values's legal parts.
    782   unsigned NumRegs = Regs.size();
    783   SmallVector<SDValue, 8> Parts(NumRegs);
    784   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
    785     EVT ValueVT = ValueVTs[Value];
    786     unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
    787     MVT RegisterVT = RegVTs[Value];
    788     ISD::NodeType ExtendKind =
    789       TLI.isZExtFree(Val, RegisterVT)? ISD::ZERO_EXTEND: ISD::ANY_EXTEND;
    790 
    791     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
    792                    &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
    793     Part += NumParts;
    794   }
    795 
    796   // Copy the parts into the registers.
    797   SmallVector<SDValue, 8> Chains(NumRegs);
    798   for (unsigned i = 0; i != NumRegs; ++i) {
    799     SDValue Part;
    800     if (Flag == 0) {
    801       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
    802     } else {
    803       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
    804       *Flag = Part.getValue(1);
    805     }
    806 
    807     Chains[i] = Part.getValue(0);
    808   }
    809 
    810   if (NumRegs == 1 || Flag)
    811     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
    812     // flagged to it. That is the CopyToReg nodes and the user are considered
    813     // a single scheduling unit. If we create a TokenFactor and return it as
    814     // chain, then the TokenFactor is both a predecessor (operand) of the
    815     // user as well as a successor (the TF operands are flagged to the user).
    816     // c1, f1 = CopyToReg
    817     // c2, f2 = CopyToReg
    818     // c3     = TokenFactor c1, c2
    819     // ...
    820     //        = op c3, ..., f2
    821     Chain = Chains[NumRegs-1];
    822   else
    823     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
    824 }
    825 
    826 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
    827 /// operand list.  This adds the code marker and includes the number of
    828 /// values added into it.
    829 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
    830                                         unsigned MatchingIdx,
    831                                         SelectionDAG &DAG,
    832                                         std::vector<SDValue> &Ops) const {
    833   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    834 
    835   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
    836   if (HasMatching)
    837     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
    838   else if (!Regs.empty() &&
    839            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
    840     // Put the register class of the virtual registers in the flag word.  That
    841     // way, later passes can recompute register class constraints for inline
    842     // assembly as well as normal instructions.
    843     // Don't do this for tied operands that can use the regclass information
    844     // from the def.
    845     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
    846     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
    847     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
    848   }
    849 
    850   SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
    851   Ops.push_back(Res);
    852 
    853   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
    854     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
    855     MVT RegisterVT = RegVTs[Value];
    856     for (unsigned i = 0; i != NumRegs; ++i) {
    857       assert(Reg < Regs.size() && "Mismatch in # registers expected");
    858       Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
    859     }
    860   }
    861 }
    862 
    863 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
    864                                const TargetLibraryInfo *li) {
    865   AA = &aa;
    866   GFI = gfi;
    867   LibInfo = li;
    868   TD = DAG.getTarget().getDataLayout();
    869   Context = DAG.getContext();
    870   LPadToCallSiteMap.clear();
    871 }
    872 
    873 /// clear - Clear out the current SelectionDAG and the associated
    874 /// state and prepare this SelectionDAGBuilder object to be used
    875 /// for a new block. This doesn't clear out information about
    876 /// additional blocks that are needed to complete switch lowering
    877 /// or PHI node updating; that information is cleared out as it is
    878 /// consumed.
    879 void SelectionDAGBuilder::clear() {
    880   NodeMap.clear();
    881   UnusedArgNodeMap.clear();
    882   PendingLoads.clear();
    883   PendingExports.clear();
    884   CurInst = NULL;
    885   HasTailCall = false;
    886 }
    887 
    888 /// clearDanglingDebugInfo - Clear the dangling debug information
    889 /// map. This function is separated from the clear so that debug
    890 /// information that is dangling in a basic block can be properly
    891 /// resolved in a different basic block. This allows the
    892 /// SelectionDAG to resolve dangling debug information attached
    893 /// to PHI nodes.
    894 void SelectionDAGBuilder::clearDanglingDebugInfo() {
    895   DanglingDebugInfoMap.clear();
    896 }
    897 
    898 /// getRoot - Return the current virtual root of the Selection DAG,
    899 /// flushing any PendingLoad items. This must be done before emitting
    900 /// a store or any other node that may need to be ordered after any
    901 /// prior load instructions.
    902 ///
    903 SDValue SelectionDAGBuilder::getRoot() {
    904   if (PendingLoads.empty())
    905     return DAG.getRoot();
    906 
    907   if (PendingLoads.size() == 1) {
    908     SDValue Root = PendingLoads[0];
    909     DAG.setRoot(Root);
    910     PendingLoads.clear();
    911     return Root;
    912   }
    913 
    914   // Otherwise, we have to make a token factor node.
    915   SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
    916                                &PendingLoads[0], PendingLoads.size());
    917   PendingLoads.clear();
    918   DAG.setRoot(Root);
    919   return Root;
    920 }
    921 
    922 /// getControlRoot - Similar to getRoot, but instead of flushing all the
    923 /// PendingLoad items, flush all the PendingExports items. It is necessary
    924 /// to do this before emitting a terminator instruction.
    925 ///
    926 SDValue SelectionDAGBuilder::getControlRoot() {
    927   SDValue Root = DAG.getRoot();
    928 
    929   if (PendingExports.empty())
    930     return Root;
    931 
    932   // Turn all of the CopyToReg chains into one factored node.
    933   if (Root.getOpcode() != ISD::EntryToken) {
    934     unsigned i = 0, e = PendingExports.size();
    935     for (; i != e; ++i) {
    936       assert(PendingExports[i].getNode()->getNumOperands() > 1);
    937       if (PendingExports[i].getNode()->getOperand(0) == Root)
    938         break;  // Don't add the root if we already indirectly depend on it.
    939     }
    940 
    941     if (i == e)
    942       PendingExports.push_back(Root);
    943   }
    944 
    945   Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
    946                      &PendingExports[0],
    947                      PendingExports.size());
    948   PendingExports.clear();
    949   DAG.setRoot(Root);
    950   return Root;
    951 }
    952 
    953 void SelectionDAGBuilder::visit(const Instruction &I) {
    954   // Set up outgoing PHI node register values before emitting the terminator.
    955   if (isa<TerminatorInst>(&I))
    956     HandlePHINodesInSuccessorBlocks(I.getParent());
    957 
    958   ++SDNodeOrder;
    959 
    960   CurInst = &I;
    961 
    962   visit(I.getOpcode(), I);
    963 
    964   if (!isa<TerminatorInst>(&I) && !HasTailCall)
    965     CopyToExportRegsIfNeeded(&I);
    966 
    967   CurInst = NULL;
    968 }
    969 
    970 void SelectionDAGBuilder::visitPHI(const PHINode &) {
    971   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
    972 }
    973 
    974 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
    975   // Note: this doesn't use InstVisitor, because it has to work with
    976   // ConstantExpr's in addition to instructions.
    977   switch (Opcode) {
    978   default: llvm_unreachable("Unknown instruction type encountered!");
    979     // Build the switch statement using the Instruction.def file.
    980 #define HANDLE_INST(NUM, OPCODE, CLASS) \
    981     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
    982 #include "llvm/IR/Instruction.def"
    983   }
    984 }
    985 
    986 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
    987 // generate the debug data structures now that we've seen its definition.
    988 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
    989                                                    SDValue Val) {
    990   DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
    991   if (DDI.getDI()) {
    992     const DbgValueInst *DI = DDI.getDI();
    993     DebugLoc dl = DDI.getdl();
    994     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
    995     MDNode *Variable = DI->getVariable();
    996     uint64_t Offset = DI->getOffset();
    997     SDDbgValue *SDV;
    998     if (Val.getNode()) {
    999       if (!EmitFuncArgumentDbgValue(V, Variable, Offset, Val)) {
   1000         SDV = DAG.getDbgValue(Variable, Val.getNode(),
   1001                               Val.getResNo(), Offset, dl, DbgSDNodeOrder);
   1002         DAG.AddDbgValue(SDV, Val.getNode(), false);
   1003       }
   1004     } else
   1005       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
   1006     DanglingDebugInfoMap[V] = DanglingDebugInfo();
   1007   }
   1008 }
   1009 
   1010 /// getValue - Return an SDValue for the given Value.
   1011 SDValue SelectionDAGBuilder::getValue(const Value *V) {
   1012   // If we already have an SDValue for this value, use it. It's important
   1013   // to do this first, so that we don't create a CopyFromReg if we already
   1014   // have a regular SDValue.
   1015   SDValue &N = NodeMap[V];
   1016   if (N.getNode()) return N;
   1017 
   1018   // If there's a virtual register allocated and initialized for this
   1019   // value, use it.
   1020   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
   1021   if (It != FuncInfo.ValueMap.end()) {
   1022     unsigned InReg = It->second;
   1023     RegsForValue RFV(*DAG.getContext(), *TM.getTargetLowering(),
   1024                      InReg, V->getType());
   1025     SDValue Chain = DAG.getEntryNode();
   1026     N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
   1027     resolveDanglingDebugInfo(V, N);
   1028     return N;
   1029   }
   1030 
   1031   // Otherwise create a new SDValue and remember it.
   1032   SDValue Val = getValueImpl(V);
   1033   NodeMap[V] = Val;
   1034   resolveDanglingDebugInfo(V, Val);
   1035   return Val;
   1036 }
   1037 
   1038 /// getNonRegisterValue - Return an SDValue for the given Value, but
   1039 /// don't look in FuncInfo.ValueMap for a virtual register.
   1040 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
   1041   // If we already have an SDValue for this value, use it.
   1042   SDValue &N = NodeMap[V];
   1043   if (N.getNode()) return N;
   1044 
   1045   // Otherwise create a new SDValue and remember it.
   1046   SDValue Val = getValueImpl(V);
   1047   NodeMap[V] = Val;
   1048   resolveDanglingDebugInfo(V, Val);
   1049   return Val;
   1050 }
   1051 
   1052 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
   1053 /// Create an SDValue for the given value.
   1054 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
   1055   const TargetLowering *TLI = TM.getTargetLowering();
   1056 
   1057   if (const Constant *C = dyn_cast<Constant>(V)) {
   1058     EVT VT = TLI->getValueType(V->getType(), true);
   1059 
   1060     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
   1061       return DAG.getConstant(*CI, VT);
   1062 
   1063     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
   1064       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
   1065 
   1066     if (isa<ConstantPointerNull>(C))
   1067       return DAG.getConstant(0, TLI->getPointerTy());
   1068 
   1069     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
   1070       return DAG.getConstantFP(*CFP, VT);
   1071 
   1072     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
   1073       return DAG.getUNDEF(VT);
   1074 
   1075     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
   1076       visit(CE->getOpcode(), *CE);
   1077       SDValue N1 = NodeMap[V];
   1078       assert(N1.getNode() && "visit didn't populate the NodeMap!");
   1079       return N1;
   1080     }
   1081 
   1082     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
   1083       SmallVector<SDValue, 4> Constants;
   1084       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
   1085            OI != OE; ++OI) {
   1086         SDNode *Val = getValue(*OI).getNode();
   1087         // If the operand is an empty aggregate, there are no values.
   1088         if (!Val) continue;
   1089         // Add each leaf value from the operand to the Constants list
   1090         // to form a flattened list of all the values.
   1091         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
   1092           Constants.push_back(SDValue(Val, i));
   1093       }
   1094 
   1095       return DAG.getMergeValues(&Constants[0], Constants.size(),
   1096                                 getCurSDLoc());
   1097     }
   1098 
   1099     if (const ConstantDataSequential *CDS =
   1100           dyn_cast<ConstantDataSequential>(C)) {
   1101       SmallVector<SDValue, 4> Ops;
   1102       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
   1103         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
   1104         // Add each leaf value from the operand to the Constants list
   1105         // to form a flattened list of all the values.
   1106         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
   1107           Ops.push_back(SDValue(Val, i));
   1108       }
   1109 
   1110       if (isa<ArrayType>(CDS->getType()))
   1111         return DAG.getMergeValues(&Ops[0], Ops.size(), getCurSDLoc());
   1112       return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
   1113                                       VT, &Ops[0], Ops.size());
   1114     }
   1115 
   1116     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
   1117       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
   1118              "Unknown struct or array constant!");
   1119 
   1120       SmallVector<EVT, 4> ValueVTs;
   1121       ComputeValueVTs(*TLI, C->getType(), ValueVTs);
   1122       unsigned NumElts = ValueVTs.size();
   1123       if (NumElts == 0)
   1124         return SDValue(); // empty struct
   1125       SmallVector<SDValue, 4> Constants(NumElts);
   1126       for (unsigned i = 0; i != NumElts; ++i) {
   1127         EVT EltVT = ValueVTs[i];
   1128         if (isa<UndefValue>(C))
   1129           Constants[i] = DAG.getUNDEF(EltVT);
   1130         else if (EltVT.isFloatingPoint())
   1131           Constants[i] = DAG.getConstantFP(0, EltVT);
   1132         else
   1133           Constants[i] = DAG.getConstant(0, EltVT);
   1134       }
   1135 
   1136       return DAG.getMergeValues(&Constants[0], NumElts,
   1137                                 getCurSDLoc());
   1138     }
   1139 
   1140     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
   1141       return DAG.getBlockAddress(BA, VT);
   1142 
   1143     VectorType *VecTy = cast<VectorType>(V->getType());
   1144     unsigned NumElements = VecTy->getNumElements();
   1145 
   1146     // Now that we know the number and type of the elements, get that number of
   1147     // elements into the Ops array based on what kind of constant it is.
   1148     SmallVector<SDValue, 16> Ops;
   1149     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
   1150       for (unsigned i = 0; i != NumElements; ++i)
   1151         Ops.push_back(getValue(CV->getOperand(i)));
   1152     } else {
   1153       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
   1154       EVT EltVT = TLI->getValueType(VecTy->getElementType());
   1155 
   1156       SDValue Op;
   1157       if (EltVT.isFloatingPoint())
   1158         Op = DAG.getConstantFP(0, EltVT);
   1159       else
   1160         Op = DAG.getConstant(0, EltVT);
   1161       Ops.assign(NumElements, Op);
   1162     }
   1163 
   1164     // Create a BUILD_VECTOR node.
   1165     return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
   1166                                     VT, &Ops[0], Ops.size());
   1167   }
   1168 
   1169   // If this is a static alloca, generate it as the frameindex instead of
   1170   // computation.
   1171   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
   1172     DenseMap<const AllocaInst*, int>::iterator SI =
   1173       FuncInfo.StaticAllocaMap.find(AI);
   1174     if (SI != FuncInfo.StaticAllocaMap.end())
   1175       return DAG.getFrameIndex(SI->second, TLI->getPointerTy());
   1176   }
   1177 
   1178   // If this is an instruction which fast-isel has deferred, select it now.
   1179   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
   1180     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
   1181     RegsForValue RFV(*DAG.getContext(), *TLI, InReg, Inst->getType());
   1182     SDValue Chain = DAG.getEntryNode();
   1183     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, NULL, V);
   1184   }
   1185 
   1186   llvm_unreachable("Can't get register for value!");
   1187 }
   1188 
   1189 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
   1190   const TargetLowering *TLI = TM.getTargetLowering();
   1191   SDValue Chain = getControlRoot();
   1192   SmallVector<ISD::OutputArg, 8> Outs;
   1193   SmallVector<SDValue, 8> OutVals;
   1194 
   1195   if (!FuncInfo.CanLowerReturn) {
   1196     unsigned DemoteReg = FuncInfo.DemoteRegister;
   1197     const Function *F = I.getParent()->getParent();
   1198 
   1199     // Emit a store of the return value through the virtual register.
   1200     // Leave Outs empty so that LowerReturn won't try to load return
   1201     // registers the usual way.
   1202     SmallVector<EVT, 1> PtrValueVTs;
   1203     ComputeValueVTs(*TLI, PointerType::getUnqual(F->getReturnType()),
   1204                     PtrValueVTs);
   1205 
   1206     SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
   1207     SDValue RetOp = getValue(I.getOperand(0));
   1208 
   1209     SmallVector<EVT, 4> ValueVTs;
   1210     SmallVector<uint64_t, 4> Offsets;
   1211     ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
   1212     unsigned NumValues = ValueVTs.size();
   1213 
   1214     SmallVector<SDValue, 4> Chains(NumValues);
   1215     for (unsigned i = 0; i != NumValues; ++i) {
   1216       SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
   1217                                 RetPtr.getValueType(), RetPtr,
   1218                                 DAG.getIntPtrConstant(Offsets[i]));
   1219       Chains[i] =
   1220         DAG.getStore(Chain, getCurSDLoc(),
   1221                      SDValue(RetOp.getNode(), RetOp.getResNo() + i),
   1222                      // FIXME: better loc info would be nice.
   1223                      Add, MachinePointerInfo(), false, false, 0);
   1224     }
   1225 
   1226     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
   1227                         MVT::Other, &Chains[0], NumValues);
   1228   } else if (I.getNumOperands() != 0) {
   1229     SmallVector<EVT, 4> ValueVTs;
   1230     ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs);
   1231     unsigned NumValues = ValueVTs.size();
   1232     if (NumValues) {
   1233       SDValue RetOp = getValue(I.getOperand(0));
   1234       for (unsigned j = 0, f = NumValues; j != f; ++j) {
   1235         EVT VT = ValueVTs[j];
   1236 
   1237         ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
   1238 
   1239         const Function *F = I.getParent()->getParent();
   1240         if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
   1241                                             Attribute::SExt))
   1242           ExtendKind = ISD::SIGN_EXTEND;
   1243         else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
   1244                                                  Attribute::ZExt))
   1245           ExtendKind = ISD::ZERO_EXTEND;
   1246 
   1247         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
   1248           VT = TLI->getTypeForExtArgOrReturn(VT.getSimpleVT(), ExtendKind);
   1249 
   1250         unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), VT);
   1251         MVT PartVT = TLI->getRegisterType(*DAG.getContext(), VT);
   1252         SmallVector<SDValue, 4> Parts(NumParts);
   1253         getCopyToParts(DAG, getCurSDLoc(),
   1254                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
   1255                        &Parts[0], NumParts, PartVT, &I, ExtendKind);
   1256 
   1257         // 'inreg' on function refers to return value
   1258         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
   1259         if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
   1260                                             Attribute::InReg))
   1261           Flags.setInReg();
   1262 
   1263         // Propagate extension type if any
   1264         if (ExtendKind == ISD::SIGN_EXTEND)
   1265           Flags.setSExt();
   1266         else if (ExtendKind == ISD::ZERO_EXTEND)
   1267           Flags.setZExt();
   1268 
   1269         for (unsigned i = 0; i < NumParts; ++i) {
   1270           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
   1271                                         /*isfixed=*/true, 0, 0));
   1272           OutVals.push_back(Parts[i]);
   1273         }
   1274       }
   1275     }
   1276   }
   1277 
   1278   bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
   1279   CallingConv::ID CallConv =
   1280     DAG.getMachineFunction().getFunction()->getCallingConv();
   1281   Chain = TM.getTargetLowering()->LowerReturn(Chain, CallConv, isVarArg,
   1282                                               Outs, OutVals, getCurSDLoc(),
   1283                                               DAG);
   1284 
   1285   // Verify that the target's LowerReturn behaved as expected.
   1286   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
   1287          "LowerReturn didn't return a valid chain!");
   1288 
   1289   // Update the DAG with the new chain value resulting from return lowering.
   1290   DAG.setRoot(Chain);
   1291 }
   1292 
   1293 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
   1294 /// created for it, emit nodes to copy the value into the virtual
   1295 /// registers.
   1296 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
   1297   // Skip empty types
   1298   if (V->getType()->isEmptyTy())
   1299     return;
   1300 
   1301   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
   1302   if (VMI != FuncInfo.ValueMap.end()) {
   1303     assert(!V->use_empty() && "Unused value assigned virtual registers!");
   1304     CopyValueToVirtualRegister(V, VMI->second);
   1305   }
   1306 }
   1307 
   1308 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
   1309 /// the current basic block, add it to ValueMap now so that we'll get a
   1310 /// CopyTo/FromReg.
   1311 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
   1312   // No need to export constants.
   1313   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
   1314 
   1315   // Already exported?
   1316   if (FuncInfo.isExportedInst(V)) return;
   1317 
   1318   unsigned Reg = FuncInfo.InitializeRegForValue(V);
   1319   CopyValueToVirtualRegister(V, Reg);
   1320 }
   1321 
   1322 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
   1323                                                      const BasicBlock *FromBB) {
   1324   // The operands of the setcc have to be in this block.  We don't know
   1325   // how to export them from some other block.
   1326   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
   1327     // Can export from current BB.
   1328     if (VI->getParent() == FromBB)
   1329       return true;
   1330 
   1331     // Is already exported, noop.
   1332     return FuncInfo.isExportedInst(V);
   1333   }
   1334 
   1335   // If this is an argument, we can export it if the BB is the entry block or
   1336   // if it is already exported.
   1337   if (isa<Argument>(V)) {
   1338     if (FromBB == &FromBB->getParent()->getEntryBlock())
   1339       return true;
   1340 
   1341     // Otherwise, can only export this if it is already exported.
   1342     return FuncInfo.isExportedInst(V);
   1343   }
   1344 
   1345   // Otherwise, constants can always be exported.
   1346   return true;
   1347 }
   1348 
   1349 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
   1350 uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
   1351                                             const MachineBasicBlock *Dst) const {
   1352   BranchProbabilityInfo *BPI = FuncInfo.BPI;
   1353   if (!BPI)
   1354     return 0;
   1355   const BasicBlock *SrcBB = Src->getBasicBlock();
   1356   const BasicBlock *DstBB = Dst->getBasicBlock();
   1357   return BPI->getEdgeWeight(SrcBB, DstBB);
   1358 }
   1359 
   1360 void SelectionDAGBuilder::
   1361 addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
   1362                        uint32_t Weight /* = 0 */) {
   1363   if (!Weight)
   1364     Weight = getEdgeWeight(Src, Dst);
   1365   Src->addSuccessor(Dst, Weight);
   1366 }
   1367 
   1368 
   1369 static bool InBlock(const Value *V, const BasicBlock *BB) {
   1370   if (const Instruction *I = dyn_cast<Instruction>(V))
   1371     return I->getParent() == BB;
   1372   return true;
   1373 }
   1374 
   1375 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
   1376 /// This function emits a branch and is used at the leaves of an OR or an
   1377 /// AND operator tree.
   1378 ///
   1379 void
   1380 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
   1381                                                   MachineBasicBlock *TBB,
   1382                                                   MachineBasicBlock *FBB,
   1383                                                   MachineBasicBlock *CurBB,
   1384                                                   MachineBasicBlock *SwitchBB) {
   1385   const BasicBlock *BB = CurBB->getBasicBlock();
   1386 
   1387   // If the leaf of the tree is a comparison, merge the condition into
   1388   // the caseblock.
   1389   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
   1390     // The operands of the cmp have to be in this block.  We don't know
   1391     // how to export them from some other block.  If this is the first block
   1392     // of the sequence, no exporting is needed.
   1393     if (CurBB == SwitchBB ||
   1394         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
   1395          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
   1396       ISD::CondCode Condition;
   1397       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
   1398         Condition = getICmpCondCode(IC->getPredicate());
   1399       } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
   1400         Condition = getFCmpCondCode(FC->getPredicate());
   1401         if (TM.Options.NoNaNsFPMath)
   1402           Condition = getFCmpCodeWithoutNaN(Condition);
   1403       } else {
   1404         Condition = ISD::SETEQ; // silence warning.
   1405         llvm_unreachable("Unknown compare instruction");
   1406       }
   1407 
   1408       CaseBlock CB(Condition, BOp->getOperand(0),
   1409                    BOp->getOperand(1), NULL, TBB, FBB, CurBB);
   1410       SwitchCases.push_back(CB);
   1411       return;
   1412     }
   1413   }
   1414 
   1415   // Create a CaseBlock record representing this branch.
   1416   CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
   1417                NULL, TBB, FBB, CurBB);
   1418   SwitchCases.push_back(CB);
   1419 }
   1420 
   1421 /// FindMergedConditions - If Cond is an expression like
   1422 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
   1423                                                MachineBasicBlock *TBB,
   1424                                                MachineBasicBlock *FBB,
   1425                                                MachineBasicBlock *CurBB,
   1426                                                MachineBasicBlock *SwitchBB,
   1427                                                unsigned Opc) {
   1428   // If this node is not part of the or/and tree, emit it as a branch.
   1429   const Instruction *BOp = dyn_cast<Instruction>(Cond);
   1430   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
   1431       (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
   1432       BOp->getParent() != CurBB->getBasicBlock() ||
   1433       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
   1434       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
   1435     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB);
   1436     return;
   1437   }
   1438 
   1439   //  Create TmpBB after CurBB.
   1440   MachineFunction::iterator BBI = CurBB;
   1441   MachineFunction &MF = DAG.getMachineFunction();
   1442   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
   1443   CurBB->getParent()->insert(++BBI, TmpBB);
   1444 
   1445   if (Opc == Instruction::Or) {
   1446     // Codegen X | Y as:
   1447     //   jmp_if_X TBB
   1448     //   jmp TmpBB
   1449     // TmpBB:
   1450     //   jmp_if_Y TBB
   1451     //   jmp FBB
   1452     //
   1453 
   1454     // Emit the LHS condition.
   1455     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc);
   1456 
   1457     // Emit the RHS condition into TmpBB.
   1458     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
   1459   } else {
   1460     assert(Opc == Instruction::And && "Unknown merge op!");
   1461     // Codegen X & Y as:
   1462     //   jmp_if_X TmpBB
   1463     //   jmp FBB
   1464     // TmpBB:
   1465     //   jmp_if_Y TBB
   1466     //   jmp FBB
   1467     //
   1468     //  This requires creation of TmpBB after CurBB.
   1469 
   1470     // Emit the LHS condition.
   1471     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc);
   1472 
   1473     // Emit the RHS condition into TmpBB.
   1474     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
   1475   }
   1476 }
   1477 
   1478 /// If the set of cases should be emitted as a series of branches, return true.
   1479 /// If we should emit this as a bunch of and/or'd together conditions, return
   1480 /// false.
   1481 bool
   1482 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
   1483   if (Cases.size() != 2) return true;
   1484 
   1485   // If this is two comparisons of the same values or'd or and'd together, they
   1486   // will get folded into a single comparison, so don't emit two blocks.
   1487   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
   1488        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
   1489       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
   1490        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
   1491     return false;
   1492   }
   1493 
   1494   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
   1495   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
   1496   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
   1497       Cases[0].CC == Cases[1].CC &&
   1498       isa<Constant>(Cases[0].CmpRHS) &&
   1499       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
   1500     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
   1501       return false;
   1502     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
   1503       return false;
   1504   }
   1505 
   1506   return true;
   1507 }
   1508 
   1509 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
   1510   MachineBasicBlock *BrMBB = FuncInfo.MBB;
   1511 
   1512   // Update machine-CFG edges.
   1513   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
   1514 
   1515   // Figure out which block is immediately after the current one.
   1516   MachineBasicBlock *NextBlock = 0;
   1517   MachineFunction::iterator BBI = BrMBB;
   1518   if (++BBI != FuncInfo.MF->end())
   1519     NextBlock = BBI;
   1520 
   1521   if (I.isUnconditional()) {
   1522     // Update machine-CFG edges.
   1523     BrMBB->addSuccessor(Succ0MBB);
   1524 
   1525     // If this is not a fall-through branch, emit the branch.
   1526     if (Succ0MBB != NextBlock)
   1527       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
   1528                               MVT::Other, getControlRoot(),
   1529                               DAG.getBasicBlock(Succ0MBB)));
   1530 
   1531     return;
   1532   }
   1533 
   1534   // If this condition is one of the special cases we handle, do special stuff
   1535   // now.
   1536   const Value *CondVal = I.getCondition();
   1537   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
   1538 
   1539   // If this is a series of conditions that are or'd or and'd together, emit
   1540   // this as a sequence of branches instead of setcc's with and/or operations.
   1541   // As long as jumps are not expensive, this should improve performance.
   1542   // For example, instead of something like:
   1543   //     cmp A, B
   1544   //     C = seteq
   1545   //     cmp D, E
   1546   //     F = setle
   1547   //     or C, F
   1548   //     jnz foo
   1549   // Emit:
   1550   //     cmp A, B
   1551   //     je foo
   1552   //     cmp D, E
   1553   //     jle foo
   1554   //
   1555   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
   1556     if (!TM.getTargetLowering()->isJumpExpensive() &&
   1557         BOp->hasOneUse() &&
   1558         (BOp->getOpcode() == Instruction::And ||
   1559          BOp->getOpcode() == Instruction::Or)) {
   1560       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
   1561                            BOp->getOpcode());
   1562       // If the compares in later blocks need to use values not currently
   1563       // exported from this block, export them now.  This block should always
   1564       // be the first entry.
   1565       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
   1566 
   1567       // Allow some cases to be rejected.
   1568       if (ShouldEmitAsBranches(SwitchCases)) {
   1569         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
   1570           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
   1571           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
   1572         }
   1573 
   1574         // Emit the branch for this block.
   1575         visitSwitchCase(SwitchCases[0], BrMBB);
   1576         SwitchCases.erase(SwitchCases.begin());
   1577         return;
   1578       }
   1579 
   1580       // Okay, we decided not to do this, remove any inserted MBB's and clear
   1581       // SwitchCases.
   1582       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
   1583         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
   1584 
   1585       SwitchCases.clear();
   1586     }
   1587   }
   1588 
   1589   // Create a CaseBlock record representing this branch.
   1590   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
   1591                NULL, Succ0MBB, Succ1MBB, BrMBB);
   1592 
   1593   // Use visitSwitchCase to actually insert the fast branch sequence for this
   1594   // cond branch.
   1595   visitSwitchCase(CB, BrMBB);
   1596 }
   1597 
   1598 /// visitSwitchCase - Emits the necessary code to represent a single node in
   1599 /// the binary search tree resulting from lowering a switch instruction.
   1600 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
   1601                                           MachineBasicBlock *SwitchBB) {
   1602   SDValue Cond;
   1603   SDValue CondLHS = getValue(CB.CmpLHS);
   1604   SDLoc dl = getCurSDLoc();
   1605 
   1606   // Build the setcc now.
   1607   if (CB.CmpMHS == NULL) {
   1608     // Fold "(X == true)" to X and "(X == false)" to !X to
   1609     // handle common cases produced by branch lowering.
   1610     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
   1611         CB.CC == ISD::SETEQ)
   1612       Cond = CondLHS;
   1613     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
   1614              CB.CC == ISD::SETEQ) {
   1615       SDValue True = DAG.getConstant(1, CondLHS.getValueType());
   1616       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
   1617     } else
   1618       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
   1619   } else {
   1620     assert(CB.CC == ISD::SETCC_INVALID &&
   1621            "Condition is undefined for to-the-range belonging check.");
   1622 
   1623     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
   1624     const APInt& High  = cast<ConstantInt>(CB.CmpRHS)->getValue();
   1625 
   1626     SDValue CmpOp = getValue(CB.CmpMHS);
   1627     EVT VT = CmpOp.getValueType();
   1628 
   1629     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(false)) {
   1630       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
   1631                           ISD::SETULE);
   1632     } else {
   1633       SDValue SUB = DAG.getNode(ISD::SUB, dl,
   1634                                 VT, CmpOp, DAG.getConstant(Low, VT));
   1635       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
   1636                           DAG.getConstant(High-Low, VT), ISD::SETULE);
   1637     }
   1638   }
   1639 
   1640   // Update successor info
   1641   addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
   1642   // TrueBB and FalseBB are always different unless the incoming IR is
   1643   // degenerate. This only happens when running llc on weird IR.
   1644   if (CB.TrueBB != CB.FalseBB)
   1645     addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
   1646 
   1647   // Set NextBlock to be the MBB immediately after the current one, if any.
   1648   // This is used to avoid emitting unnecessary branches to the next block.
   1649   MachineBasicBlock *NextBlock = 0;
   1650   MachineFunction::iterator BBI = SwitchBB;
   1651   if (++BBI != FuncInfo.MF->end())
   1652     NextBlock = BBI;
   1653 
   1654   // If the lhs block is the next block, invert the condition so that we can
   1655   // fall through to the lhs instead of the rhs block.
   1656   if (CB.TrueBB == NextBlock) {
   1657     std::swap(CB.TrueBB, CB.FalseBB);
   1658     SDValue True = DAG.getConstant(1, Cond.getValueType());
   1659     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
   1660   }
   1661 
   1662   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
   1663                                MVT::Other, getControlRoot(), Cond,
   1664                                DAG.getBasicBlock(CB.TrueBB));
   1665 
   1666   // Insert the false branch. Do this even if it's a fall through branch,
   1667   // this makes it easier to do DAG optimizations which require inverting
   1668   // the branch condition.
   1669   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
   1670                        DAG.getBasicBlock(CB.FalseBB));
   1671 
   1672   DAG.setRoot(BrCond);
   1673 }
   1674 
   1675 /// visitJumpTable - Emit JumpTable node in the current MBB
   1676 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
   1677   // Emit the code for the jump table
   1678   assert(JT.Reg != -1U && "Should lower JT Header first!");
   1679   EVT PTy = TM.getTargetLowering()->getPointerTy();
   1680   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
   1681                                      JT.Reg, PTy);
   1682   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
   1683   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
   1684                                     MVT::Other, Index.getValue(1),
   1685                                     Table, Index);
   1686   DAG.setRoot(BrJumpTable);
   1687 }
   1688 
   1689 /// visitJumpTableHeader - This function emits necessary code to produce index
   1690 /// in the JumpTable from switch case.
   1691 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
   1692                                                JumpTableHeader &JTH,
   1693                                                MachineBasicBlock *SwitchBB) {
   1694   // Subtract the lowest switch case value from the value being switched on and
   1695   // conditional branch to default mbb if the result is greater than the
   1696   // difference between smallest and largest cases.
   1697   SDValue SwitchOp = getValue(JTH.SValue);
   1698   EVT VT = SwitchOp.getValueType();
   1699   SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
   1700                             DAG.getConstant(JTH.First, VT));
   1701 
   1702   // The SDNode we just created, which holds the value being switched on minus
   1703   // the smallest case value, needs to be copied to a virtual register so it
   1704   // can be used as an index into the jump table in a subsequent basic block.
   1705   // This value may be smaller or larger than the target's pointer type, and
   1706   // therefore require extension or truncating.
   1707   const TargetLowering *TLI = TM.getTargetLowering();
   1708   SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI->getPointerTy());
   1709 
   1710   unsigned JumpTableReg = FuncInfo.CreateReg(TLI->getPointerTy());
   1711   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
   1712                                     JumpTableReg, SwitchOp);
   1713   JT.Reg = JumpTableReg;
   1714 
   1715   // Emit the range check for the jump table, and branch to the default block
   1716   // for the switch statement if the value being switched on exceeds the largest
   1717   // case in the switch.
   1718   SDValue CMP = DAG.getSetCC(getCurSDLoc(),
   1719                              TLI->getSetCCResultType(*DAG.getContext(),
   1720                                                      Sub.getValueType()),
   1721                              Sub,
   1722                              DAG.getConstant(JTH.Last - JTH.First,VT),
   1723                              ISD::SETUGT);
   1724 
   1725   // Set NextBlock to be the MBB immediately after the current one, if any.
   1726   // This is used to avoid emitting unnecessary branches to the next block.
   1727   MachineBasicBlock *NextBlock = 0;
   1728   MachineFunction::iterator BBI = SwitchBB;
   1729 
   1730   if (++BBI != FuncInfo.MF->end())
   1731     NextBlock = BBI;
   1732 
   1733   SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
   1734                                MVT::Other, CopyTo, CMP,
   1735                                DAG.getBasicBlock(JT.Default));
   1736 
   1737   if (JT.MBB != NextBlock)
   1738     BrCond = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrCond,
   1739                          DAG.getBasicBlock(JT.MBB));
   1740 
   1741   DAG.setRoot(BrCond);
   1742 }
   1743 
   1744 /// visitBitTestHeader - This function emits necessary code to produce value
   1745 /// suitable for "bit tests"
   1746 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
   1747                                              MachineBasicBlock *SwitchBB) {
   1748   // Subtract the minimum value
   1749   SDValue SwitchOp = getValue(B.SValue);
   1750   EVT VT = SwitchOp.getValueType();
   1751   SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
   1752                             DAG.getConstant(B.First, VT));
   1753 
   1754   // Check range
   1755   const TargetLowering *TLI = TM.getTargetLowering();
   1756   SDValue RangeCmp = DAG.getSetCC(getCurSDLoc(),
   1757                                   TLI->getSetCCResultType(*DAG.getContext(),
   1758                                                          Sub.getValueType()),
   1759                                   Sub, DAG.getConstant(B.Range, VT),
   1760                                   ISD::SETUGT);
   1761 
   1762   // Determine the type of the test operands.
   1763   bool UsePtrType = false;
   1764   if (!TLI->isTypeLegal(VT))
   1765     UsePtrType = true;
   1766   else {
   1767     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
   1768       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
   1769         // Switch table case range are encoded into series of masks.
   1770         // Just use pointer type, it's guaranteed to fit.
   1771         UsePtrType = true;
   1772         break;
   1773       }
   1774   }
   1775   if (UsePtrType) {
   1776     VT = TLI->getPointerTy();
   1777     Sub = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), VT);
   1778   }
   1779 
   1780   B.RegVT = VT.getSimpleVT();
   1781   B.Reg = FuncInfo.CreateReg(B.RegVT);
   1782   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
   1783                                     B.Reg, Sub);
   1784 
   1785   // Set NextBlock to be the MBB immediately after the current one, if any.
   1786   // This is used to avoid emitting unnecessary branches to the next block.
   1787   MachineBasicBlock *NextBlock = 0;
   1788   MachineFunction::iterator BBI = SwitchBB;
   1789   if (++BBI != FuncInfo.MF->end())
   1790     NextBlock = BBI;
   1791 
   1792   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
   1793 
   1794   addSuccessorWithWeight(SwitchBB, B.Default);
   1795   addSuccessorWithWeight(SwitchBB, MBB);
   1796 
   1797   SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
   1798                                 MVT::Other, CopyTo, RangeCmp,
   1799                                 DAG.getBasicBlock(B.Default));
   1800 
   1801   if (MBB != NextBlock)
   1802     BrRange = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, CopyTo,
   1803                           DAG.getBasicBlock(MBB));
   1804 
   1805   DAG.setRoot(BrRange);
   1806 }
   1807 
   1808 /// visitBitTestCase - this function produces one "bit test"
   1809 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
   1810                                            MachineBasicBlock* NextMBB,
   1811                                            uint32_t BranchWeightToNext,
   1812                                            unsigned Reg,
   1813                                            BitTestCase &B,
   1814                                            MachineBasicBlock *SwitchBB) {
   1815   MVT VT = BB.RegVT;
   1816   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
   1817                                        Reg, VT);
   1818   SDValue Cmp;
   1819   unsigned PopCount = CountPopulation_64(B.Mask);
   1820   const TargetLowering *TLI = TM.getTargetLowering();
   1821   if (PopCount == 1) {
   1822     // Testing for a single bit; just compare the shift count with what it
   1823     // would need to be to shift a 1 bit in that position.
   1824     Cmp = DAG.getSetCC(getCurSDLoc(),
   1825                        TLI->getSetCCResultType(*DAG.getContext(), VT),
   1826                        ShiftOp,
   1827                        DAG.getConstant(countTrailingZeros(B.Mask), VT),
   1828                        ISD::SETEQ);
   1829   } else if (PopCount == BB.Range) {
   1830     // There is only one zero bit in the range, test for it directly.
   1831     Cmp = DAG.getSetCC(getCurSDLoc(),
   1832                        TLI->getSetCCResultType(*DAG.getContext(), VT),
   1833                        ShiftOp,
   1834                        DAG.getConstant(CountTrailingOnes_64(B.Mask), VT),
   1835                        ISD::SETNE);
   1836   } else {
   1837     // Make desired shift
   1838     SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurSDLoc(), VT,
   1839                                     DAG.getConstant(1, VT), ShiftOp);
   1840 
   1841     // Emit bit tests and jumps
   1842     SDValue AndOp = DAG.getNode(ISD::AND, getCurSDLoc(),
   1843                                 VT, SwitchVal, DAG.getConstant(B.Mask, VT));
   1844     Cmp = DAG.getSetCC(getCurSDLoc(),
   1845                        TLI->getSetCCResultType(*DAG.getContext(), VT),
   1846                        AndOp, DAG.getConstant(0, VT),
   1847                        ISD::SETNE);
   1848   }
   1849 
   1850   // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
   1851   addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
   1852   // The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
   1853   addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
   1854 
   1855   SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
   1856                               MVT::Other, getControlRoot(),
   1857                               Cmp, DAG.getBasicBlock(B.TargetBB));
   1858 
   1859   // Set NextBlock to be the MBB immediately after the current one, if any.
   1860   // This is used to avoid emitting unnecessary branches to the next block.
   1861   MachineBasicBlock *NextBlock = 0;
   1862   MachineFunction::iterator BBI = SwitchBB;
   1863   if (++BBI != FuncInfo.MF->end())
   1864     NextBlock = BBI;
   1865 
   1866   if (NextMBB != NextBlock)
   1867     BrAnd = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrAnd,
   1868                         DAG.getBasicBlock(NextMBB));
   1869 
   1870   DAG.setRoot(BrAnd);
   1871 }
   1872 
   1873 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
   1874   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
   1875 
   1876   // Retrieve successors.
   1877   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
   1878   MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
   1879 
   1880   const Value *Callee(I.getCalledValue());
   1881   const Function *Fn = dyn_cast<Function>(Callee);
   1882   if (isa<InlineAsm>(Callee))
   1883     visitInlineAsm(&I);
   1884   else if (Fn && Fn->isIntrinsic()) {
   1885     assert(Fn->getIntrinsicID() == Intrinsic::donothing);
   1886     // Ignore invokes to @llvm.donothing: jump directly to the next BB.
   1887   } else
   1888     LowerCallTo(&I, getValue(Callee), false, LandingPad);
   1889 
   1890   // If the value of the invoke is used outside of its defining block, make it
   1891   // available as a virtual register.
   1892   CopyToExportRegsIfNeeded(&I);
   1893 
   1894   // Update successor info
   1895   addSuccessorWithWeight(InvokeMBB, Return);
   1896   addSuccessorWithWeight(InvokeMBB, LandingPad);
   1897 
   1898   // Drop into normal successor.
   1899   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
   1900                           MVT::Other, getControlRoot(),
   1901                           DAG.getBasicBlock(Return)));
   1902 }
   1903 
   1904 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
   1905   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
   1906 }
   1907 
   1908 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
   1909   assert(FuncInfo.MBB->isLandingPad() &&
   1910          "Call to landingpad not in landing pad!");
   1911 
   1912   MachineBasicBlock *MBB = FuncInfo.MBB;
   1913   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   1914   AddLandingPadInfo(LP, MMI, MBB);
   1915 
   1916   // If there aren't registers to copy the values into (e.g., during SjLj
   1917   // exceptions), then don't bother to create these DAG nodes.
   1918   const TargetLowering *TLI = TM.getTargetLowering();
   1919   if (TLI->getExceptionPointerRegister() == 0 &&
   1920       TLI->getExceptionSelectorRegister() == 0)
   1921     return;
   1922 
   1923   SmallVector<EVT, 2> ValueVTs;
   1924   ComputeValueVTs(*TLI, LP.getType(), ValueVTs);
   1925   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
   1926 
   1927   // Get the two live-in registers as SDValues. The physregs have already been
   1928   // copied into virtual registers.
   1929   SDValue Ops[2];
   1930   Ops[0] = DAG.getZExtOrTrunc(
   1931     DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
   1932                        FuncInfo.ExceptionPointerVirtReg, TLI->getPointerTy()),
   1933     getCurSDLoc(), ValueVTs[0]);
   1934   Ops[1] = DAG.getZExtOrTrunc(
   1935     DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
   1936                        FuncInfo.ExceptionSelectorVirtReg, TLI->getPointerTy()),
   1937     getCurSDLoc(), ValueVTs[1]);
   1938 
   1939   // Merge into one.
   1940   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
   1941                             DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
   1942                             &Ops[0], 2);
   1943   setValue(&LP, Res);
   1944 }
   1945 
   1946 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
   1947 /// small case ranges).
   1948 bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
   1949                                                  CaseRecVector& WorkList,
   1950                                                  const Value* SV,
   1951                                                  MachineBasicBlock *Default,
   1952                                                  MachineBasicBlock *SwitchBB) {
   1953   // Size is the number of Cases represented by this range.
   1954   size_t Size = CR.Range.second - CR.Range.first;
   1955   if (Size > 3)
   1956     return false;
   1957 
   1958   // Get the MachineFunction which holds the current MBB.  This is used when
   1959   // inserting any additional MBBs necessary to represent the switch.
   1960   MachineFunction *CurMF = FuncInfo.MF;
   1961 
   1962   // Figure out which block is immediately after the current one.
   1963   MachineBasicBlock *NextBlock = 0;
   1964   MachineFunction::iterator BBI = CR.CaseBB;
   1965 
   1966   if (++BBI != FuncInfo.MF->end())
   1967     NextBlock = BBI;
   1968 
   1969   BranchProbabilityInfo *BPI = FuncInfo.BPI;
   1970   // If any two of the cases has the same destination, and if one value
   1971   // is the same as the other, but has one bit unset that the other has set,
   1972   // use bit manipulation to do two compares at once.  For example:
   1973   // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
   1974   // TODO: This could be extended to merge any 2 cases in switches with 3 cases.
   1975   // TODO: Handle cases where CR.CaseBB != SwitchBB.
   1976   if (Size == 2 && CR.CaseBB == SwitchBB) {
   1977     Case &Small = *CR.Range.first;
   1978     Case &Big = *(CR.Range.second-1);
   1979 
   1980     if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) {
   1981       const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue();
   1982       const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue();
   1983 
   1984       // Check that there is only one bit different.
   1985       if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 &&
   1986           (SmallValue | BigValue) == BigValue) {
   1987         // Isolate the common bit.
   1988         APInt CommonBit = BigValue & ~SmallValue;
   1989         assert((SmallValue | CommonBit) == BigValue &&
   1990                CommonBit.countPopulation() == 1 && "Not a common bit?");
   1991 
   1992         SDValue CondLHS = getValue(SV);
   1993         EVT VT = CondLHS.getValueType();
   1994         SDLoc DL = getCurSDLoc();
   1995 
   1996         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
   1997                                  DAG.getConstant(CommonBit, VT));
   1998         SDValue Cond = DAG.getSetCC(DL, MVT::i1,
   1999                                     Or, DAG.getConstant(BigValue, VT),
   2000                                     ISD::SETEQ);
   2001 
   2002         // Update successor info.
   2003         // Both Small and Big will jump to Small.BB, so we sum up the weights.
   2004         addSuccessorWithWeight(SwitchBB, Small.BB,
   2005                                Small.ExtraWeight + Big.ExtraWeight);
   2006         addSuccessorWithWeight(SwitchBB, Default,
   2007           // The default destination is the first successor in IR.
   2008           BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0);
   2009 
   2010         // Insert the true branch.
   2011         SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
   2012                                      getControlRoot(), Cond,
   2013                                      DAG.getBasicBlock(Small.BB));
   2014 
   2015         // Insert the false branch.
   2016         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
   2017                              DAG.getBasicBlock(Default));
   2018 
   2019         DAG.setRoot(BrCond);
   2020         return true;
   2021       }
   2022     }
   2023   }
   2024 
   2025   // Order cases by weight so the most likely case will be checked first.
   2026   uint32_t UnhandledWeights = 0;
   2027   if (BPI) {
   2028     for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
   2029       uint32_t IWeight = I->ExtraWeight;
   2030       UnhandledWeights += IWeight;
   2031       for (CaseItr J = CR.Range.first; J < I; ++J) {
   2032         uint32_t JWeight = J->ExtraWeight;
   2033         if (IWeight > JWeight)
   2034           std::swap(*I, *J);
   2035       }
   2036     }
   2037   }
   2038   // Rearrange the case blocks so that the last one falls through if possible.
   2039   Case &BackCase = *(CR.Range.second-1);
   2040   if (Size > 1 &&
   2041       NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
   2042     // The last case block won't fall through into 'NextBlock' if we emit the
   2043     // branches in this order.  See if rearranging a case value would help.
   2044     // We start at the bottom as it's the case with the least weight.
   2045     for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I)
   2046       if (I->BB == NextBlock) {
   2047         std::swap(*I, BackCase);
   2048         break;
   2049       }
   2050   }
   2051 
   2052   // Create a CaseBlock record representing a conditional branch to
   2053   // the Case's target mbb if the value being switched on SV is equal
   2054   // to C.
   2055   MachineBasicBlock *CurBlock = CR.CaseBB;
   2056   for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
   2057     MachineBasicBlock *FallThrough;
   2058     if (I != E-1) {
   2059       FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
   2060       CurMF->insert(BBI, FallThrough);
   2061 
   2062       // Put SV in a virtual register to make it available from the new blocks.
   2063       ExportFromCurrentBlock(SV);
   2064     } else {
   2065       // If the last case doesn't match, go to the default block.
   2066       FallThrough = Default;
   2067     }
   2068 
   2069     const Value *RHS, *LHS, *MHS;
   2070     ISD::CondCode CC;
   2071     if (I->High == I->Low) {
   2072       // This is just small small case range :) containing exactly 1 case
   2073       CC = ISD::SETEQ;
   2074       LHS = SV; RHS = I->High; MHS = NULL;
   2075     } else {
   2076       CC = ISD::SETCC_INVALID;
   2077       LHS = I->Low; MHS = SV; RHS = I->High;
   2078     }
   2079 
   2080     // The false weight should be sum of all un-handled cases.
   2081     UnhandledWeights -= I->ExtraWeight;
   2082     CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough,
   2083                  /* me */ CurBlock,
   2084                  /* trueweight */ I->ExtraWeight,
   2085                  /* falseweight */ UnhandledWeights);
   2086 
   2087     // If emitting the first comparison, just call visitSwitchCase to emit the
   2088     // code into the current block.  Otherwise, push the CaseBlock onto the
   2089     // vector to be later processed by SDISel, and insert the node's MBB
   2090     // before the next MBB.
   2091     if (CurBlock == SwitchBB)
   2092       visitSwitchCase(CB, SwitchBB);
   2093     else
   2094       SwitchCases.push_back(CB);
   2095 
   2096     CurBlock = FallThrough;
   2097   }
   2098 
   2099   return true;
   2100 }
   2101 
   2102 static inline bool areJTsAllowed(const TargetLowering &TLI) {
   2103   return TLI.supportJumpTables() &&
   2104           (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
   2105            TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
   2106 }
   2107 
   2108 static APInt ComputeRange(const APInt &First, const APInt &Last) {
   2109   uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
   2110   APInt LastExt = Last.zext(BitWidth), FirstExt = First.zext(BitWidth);
   2111   return (LastExt - FirstExt + 1ULL);
   2112 }
   2113 
   2114 /// handleJTSwitchCase - Emit jumptable for current switch case range
   2115 bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
   2116                                              CaseRecVector &WorkList,
   2117                                              const Value *SV,
   2118                                              MachineBasicBlock *Default,
   2119                                              MachineBasicBlock *SwitchBB) {
   2120   Case& FrontCase = *CR.Range.first;
   2121   Case& BackCase  = *(CR.Range.second-1);
   2122 
   2123   const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
   2124   const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
   2125 
   2126   APInt TSize(First.getBitWidth(), 0);
   2127   for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
   2128     TSize += I->size();
   2129 
   2130   const TargetLowering *TLI = TM.getTargetLowering();
   2131   if (!areJTsAllowed(*TLI) || TSize.ult(TLI->getMinimumJumpTableEntries()))
   2132     return false;
   2133 
   2134   APInt Range = ComputeRange(First, Last);
   2135   // The density is TSize / Range. Require at least 40%.
   2136   // It should not be possible for IntTSize to saturate for sane code, but make
   2137   // sure we handle Range saturation correctly.
   2138   uint64_t IntRange = Range.getLimitedValue(UINT64_MAX/10);
   2139   uint64_t IntTSize = TSize.getLimitedValue(UINT64_MAX/10);
   2140   if (IntTSize * 10 < IntRange * 4)
   2141     return false;
   2142 
   2143   DEBUG(dbgs() << "Lowering jump table\n"
   2144                << "First entry: " << First << ". Last entry: " << Last << '\n'
   2145                << "Range: " << Range << ". Size: " << TSize << ".\n\n");
   2146 
   2147   // Get the MachineFunction which holds the current MBB.  This is used when
   2148   // inserting any additional MBBs necessary to represent the switch.
   2149   MachineFunction *CurMF = FuncInfo.MF;
   2150 
   2151   // Figure out which block is immediately after the current one.
   2152   MachineFunction::iterator BBI = CR.CaseBB;
   2153   ++BBI;
   2154 
   2155   const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
   2156 
   2157   // Create a new basic block to hold the code for loading the address
   2158   // of the jump table, and jumping to it.  Update successor information;
   2159   // we will either branch to the default case for the switch, or the jump
   2160   // table.
   2161   MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2162   CurMF->insert(BBI, JumpTableBB);
   2163 
   2164   addSuccessorWithWeight(CR.CaseBB, Default);
   2165   addSuccessorWithWeight(CR.CaseBB, JumpTableBB);
   2166 
   2167   // Build a vector of destination BBs, corresponding to each target
   2168   // of the jump table. If the value of the jump table slot corresponds to
   2169   // a case statement, push the case's BB onto the vector, otherwise, push
   2170   // the default BB.
   2171   std::vector<MachineBasicBlock*> DestBBs;
   2172   APInt TEI = First;
   2173   for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
   2174     const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
   2175     const APInt &High = cast<ConstantInt>(I->High)->getValue();
   2176 
   2177     if (Low.ule(TEI) && TEI.ule(High)) {
   2178       DestBBs.push_back(I->BB);
   2179       if (TEI==High)
   2180         ++I;
   2181     } else {
   2182       DestBBs.push_back(Default);
   2183     }
   2184   }
   2185 
   2186   // Calculate weight for each unique destination in CR.
   2187   DenseMap<MachineBasicBlock*, uint32_t> DestWeights;
   2188   if (FuncInfo.BPI)
   2189     for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
   2190       DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
   2191           DestWeights.find(I->BB);
   2192       if (Itr != DestWeights.end())
   2193         Itr->second += I->ExtraWeight;
   2194       else
   2195         DestWeights[I->BB] = I->ExtraWeight;
   2196     }
   2197 
   2198   // Update successor info. Add one edge to each unique successor.
   2199   BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
   2200   for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
   2201          E = DestBBs.end(); I != E; ++I) {
   2202     if (!SuccsHandled[(*I)->getNumber()]) {
   2203       SuccsHandled[(*I)->getNumber()] = true;
   2204       DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
   2205           DestWeights.find(*I);
   2206       addSuccessorWithWeight(JumpTableBB, *I,
   2207                              Itr != DestWeights.end() ? Itr->second : 0);
   2208     }
   2209   }
   2210 
   2211   // Create a jump table index for this jump table.
   2212   unsigned JTEncoding = TLI->getJumpTableEncoding();
   2213   unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
   2214                        ->createJumpTableIndex(DestBBs);
   2215 
   2216   // Set the jump table information so that we can codegen it as a second
   2217   // MachineBasicBlock
   2218   JumpTable JT(-1U, JTI, JumpTableBB, Default);
   2219   JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB));
   2220   if (CR.CaseBB == SwitchBB)
   2221     visitJumpTableHeader(JT, JTH, SwitchBB);
   2222 
   2223   JTCases.push_back(JumpTableBlock(JTH, JT));
   2224   return true;
   2225 }
   2226 
   2227 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
   2228 /// 2 subtrees.
   2229 bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
   2230                                                   CaseRecVector& WorkList,
   2231                                                   const Value* SV,
   2232                                                   MachineBasicBlock* Default,
   2233                                                   MachineBasicBlock* SwitchBB) {
   2234   // Get the MachineFunction which holds the current MBB.  This is used when
   2235   // inserting any additional MBBs necessary to represent the switch.
   2236   MachineFunction *CurMF = FuncInfo.MF;
   2237 
   2238   // Figure out which block is immediately after the current one.
   2239   MachineFunction::iterator BBI = CR.CaseBB;
   2240   ++BBI;
   2241 
   2242   Case& FrontCase = *CR.Range.first;
   2243   Case& BackCase  = *(CR.Range.second-1);
   2244   const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
   2245 
   2246   // Size is the number of Cases represented by this range.
   2247   unsigned Size = CR.Range.second - CR.Range.first;
   2248 
   2249   const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
   2250   const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
   2251   double FMetric = 0;
   2252   CaseItr Pivot = CR.Range.first + Size/2;
   2253 
   2254   // Select optimal pivot, maximizing sum density of LHS and RHS. This will
   2255   // (heuristically) allow us to emit JumpTable's later.
   2256   APInt TSize(First.getBitWidth(), 0);
   2257   for (CaseItr I = CR.Range.first, E = CR.Range.second;
   2258        I!=E; ++I)
   2259     TSize += I->size();
   2260 
   2261   APInt LSize = FrontCase.size();
   2262   APInt RSize = TSize-LSize;
   2263   DEBUG(dbgs() << "Selecting best pivot: \n"
   2264                << "First: " << First << ", Last: " << Last <<'\n'
   2265                << "LSize: " << LSize << ", RSize: " << RSize << '\n');
   2266   for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
   2267        J!=E; ++I, ++J) {
   2268     const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
   2269     const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
   2270     APInt Range = ComputeRange(LEnd, RBegin);
   2271     assert((Range - 2ULL).isNonNegative() &&
   2272            "Invalid case distance");
   2273     // Use volatile double here to avoid excess precision issues on some hosts,
   2274     // e.g. that use 80-bit X87 registers.
   2275     volatile double LDensity =
   2276        (double)LSize.roundToDouble() /
   2277                            (LEnd - First + 1ULL).roundToDouble();
   2278     volatile double RDensity =
   2279       (double)RSize.roundToDouble() /
   2280                            (Last - RBegin + 1ULL).roundToDouble();
   2281     double Metric = Range.logBase2()*(LDensity+RDensity);
   2282     // Should always split in some non-trivial place
   2283     DEBUG(dbgs() <<"=>Step\n"
   2284                  << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
   2285                  << "LDensity: " << LDensity
   2286                  << ", RDensity: " << RDensity << '\n'
   2287                  << "Metric: " << Metric << '\n');
   2288     if (FMetric < Metric) {
   2289       Pivot = J;
   2290       FMetric = Metric;
   2291       DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n');
   2292     }
   2293 
   2294     LSize += J->size();
   2295     RSize -= J->size();
   2296   }
   2297 
   2298   const TargetLowering *TLI = TM.getTargetLowering();
   2299   if (areJTsAllowed(*TLI)) {
   2300     // If our case is dense we *really* should handle it earlier!
   2301     assert((FMetric > 0) && "Should handle dense range earlier!");
   2302   } else {
   2303     Pivot = CR.Range.first + Size/2;
   2304   }
   2305 
   2306   CaseRange LHSR(CR.Range.first, Pivot);
   2307   CaseRange RHSR(Pivot, CR.Range.second);
   2308   const Constant *C = Pivot->Low;
   2309   MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
   2310 
   2311   // We know that we branch to the LHS if the Value being switched on is
   2312   // less than the Pivot value, C.  We use this to optimize our binary
   2313   // tree a bit, by recognizing that if SV is greater than or equal to the
   2314   // LHS's Case Value, and that Case Value is exactly one less than the
   2315   // Pivot's Value, then we can branch directly to the LHS's Target,
   2316   // rather than creating a leaf node for it.
   2317   if ((LHSR.second - LHSR.first) == 1 &&
   2318       LHSR.first->High == CR.GE &&
   2319       cast<ConstantInt>(C)->getValue() ==
   2320       (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
   2321     TrueBB = LHSR.first->BB;
   2322   } else {
   2323     TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2324     CurMF->insert(BBI, TrueBB);
   2325     WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
   2326 
   2327     // Put SV in a virtual register to make it available from the new blocks.
   2328     ExportFromCurrentBlock(SV);
   2329   }
   2330 
   2331   // Similar to the optimization above, if the Value being switched on is
   2332   // known to be less than the Constant CR.LT, and the current Case Value
   2333   // is CR.LT - 1, then we can branch directly to the target block for
   2334   // the current Case Value, rather than emitting a RHS leaf node for it.
   2335   if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
   2336       cast<ConstantInt>(RHSR.first->Low)->getValue() ==
   2337       (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
   2338     FalseBB = RHSR.first->BB;
   2339   } else {
   2340     FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2341     CurMF->insert(BBI, FalseBB);
   2342     WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
   2343 
   2344     // Put SV in a virtual register to make it available from the new blocks.
   2345     ExportFromCurrentBlock(SV);
   2346   }
   2347 
   2348   // Create a CaseBlock record representing a conditional branch to
   2349   // the LHS node if the value being switched on SV is less than C.
   2350   // Otherwise, branch to LHS.
   2351   CaseBlock CB(ISD::SETULT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
   2352 
   2353   if (CR.CaseBB == SwitchBB)
   2354     visitSwitchCase(CB, SwitchBB);
   2355   else
   2356     SwitchCases.push_back(CB);
   2357 
   2358   return true;
   2359 }
   2360 
   2361 /// handleBitTestsSwitchCase - if current case range has few destination and
   2362 /// range span less, than machine word bitwidth, encode case range into series
   2363 /// of masks and emit bit tests with these masks.
   2364 bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
   2365                                                    CaseRecVector& WorkList,
   2366                                                    const Value* SV,
   2367                                                    MachineBasicBlock* Default,
   2368                                                    MachineBasicBlock* SwitchBB) {
   2369   const TargetLowering *TLI = TM.getTargetLowering();
   2370   EVT PTy = TLI->getPointerTy();
   2371   unsigned IntPtrBits = PTy.getSizeInBits();
   2372 
   2373   Case& FrontCase = *CR.Range.first;
   2374   Case& BackCase  = *(CR.Range.second-1);
   2375 
   2376   // Get the MachineFunction which holds the current MBB.  This is used when
   2377   // inserting any additional MBBs necessary to represent the switch.
   2378   MachineFunction *CurMF = FuncInfo.MF;
   2379 
   2380   // If target does not have legal shift left, do not emit bit tests at all.
   2381   if (!TLI->isOperationLegal(ISD::SHL, TLI->getPointerTy()))
   2382     return false;
   2383 
   2384   size_t numCmps = 0;
   2385   for (CaseItr I = CR.Range.first, E = CR.Range.second;
   2386        I!=E; ++I) {
   2387     // Single case counts one, case range - two.
   2388     numCmps += (I->Low == I->High ? 1 : 2);
   2389   }
   2390 
   2391   // Count unique destinations
   2392   SmallSet<MachineBasicBlock*, 4> Dests;
   2393   for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
   2394     Dests.insert(I->BB);
   2395     if (Dests.size() > 3)
   2396       // Don't bother the code below, if there are too much unique destinations
   2397       return false;
   2398   }
   2399   DEBUG(dbgs() << "Total number of unique destinations: "
   2400         << Dests.size() << '\n'
   2401         << "Total number of comparisons: " << numCmps << '\n');
   2402 
   2403   // Compute span of values.
   2404   const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
   2405   const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
   2406   APInt cmpRange = maxValue - minValue;
   2407 
   2408   DEBUG(dbgs() << "Compare range: " << cmpRange << '\n'
   2409                << "Low bound: " << minValue << '\n'
   2410                << "High bound: " << maxValue << '\n');
   2411 
   2412   if (cmpRange.uge(IntPtrBits) ||
   2413       (!(Dests.size() == 1 && numCmps >= 3) &&
   2414        !(Dests.size() == 2 && numCmps >= 5) &&
   2415        !(Dests.size() >= 3 && numCmps >= 6)))
   2416     return false;
   2417 
   2418   DEBUG(dbgs() << "Emitting bit tests\n");
   2419   APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
   2420 
   2421   // Optimize the case where all the case values fit in a
   2422   // word without having to subtract minValue. In this case,
   2423   // we can optimize away the subtraction.
   2424   if (maxValue.ult(IntPtrBits)) {
   2425     cmpRange = maxValue;
   2426   } else {
   2427     lowBound = minValue;
   2428   }
   2429 
   2430   CaseBitsVector CasesBits;
   2431   unsigned i, count = 0;
   2432 
   2433   for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
   2434     MachineBasicBlock* Dest = I->BB;
   2435     for (i = 0; i < count; ++i)
   2436       if (Dest == CasesBits[i].BB)
   2437         break;
   2438 
   2439     if (i == count) {
   2440       assert((count < 3) && "Too much destinations to test!");
   2441       CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/));
   2442       count++;
   2443     }
   2444 
   2445     const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
   2446     const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
   2447 
   2448     uint64_t lo = (lowValue - lowBound).getZExtValue();
   2449     uint64_t hi = (highValue - lowBound).getZExtValue();
   2450     CasesBits[i].ExtraWeight += I->ExtraWeight;
   2451 
   2452     for (uint64_t j = lo; j <= hi; j++) {
   2453       CasesBits[i].Mask |=  1ULL << j;
   2454       CasesBits[i].Bits++;
   2455     }
   2456 
   2457   }
   2458   std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
   2459 
   2460   BitTestInfo BTC;
   2461 
   2462   // Figure out which block is immediately after the current one.
   2463   MachineFunction::iterator BBI = CR.CaseBB;
   2464   ++BBI;
   2465 
   2466   const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
   2467 
   2468   DEBUG(dbgs() << "Cases:\n");
   2469   for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
   2470     DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask
   2471                  << ", Bits: " << CasesBits[i].Bits
   2472                  << ", BB: " << CasesBits[i].BB << '\n');
   2473 
   2474     MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2475     CurMF->insert(BBI, CaseBB);
   2476     BTC.push_back(BitTestCase(CasesBits[i].Mask,
   2477                               CaseBB,
   2478                               CasesBits[i].BB, CasesBits[i].ExtraWeight));
   2479 
   2480     // Put SV in a virtual register to make it available from the new blocks.
   2481     ExportFromCurrentBlock(SV);
   2482   }
   2483 
   2484   BitTestBlock BTB(lowBound, cmpRange, SV,
   2485                    -1U, MVT::Other, (CR.CaseBB == SwitchBB),
   2486                    CR.CaseBB, Default, BTC);
   2487 
   2488   if (CR.CaseBB == SwitchBB)
   2489     visitBitTestHeader(BTB, SwitchBB);
   2490 
   2491   BitTestCases.push_back(BTB);
   2492 
   2493   return true;
   2494 }
   2495 
   2496 /// Clusterify - Transform simple list of Cases into list of CaseRange's
   2497 size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
   2498                                        const SwitchInst& SI) {
   2499 
   2500   /// Use a shorter form of declaration, and also
   2501   /// show the we want to use CRSBuilder as Clusterifier.
   2502   typedef IntegersSubsetMapping<MachineBasicBlock> Clusterifier;
   2503 
   2504   Clusterifier TheClusterifier;
   2505 
   2506   BranchProbabilityInfo *BPI = FuncInfo.BPI;
   2507   // Start with "simple" cases
   2508   for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
   2509        i != e; ++i) {
   2510     const BasicBlock *SuccBB = i.getCaseSuccessor();
   2511     MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
   2512 
   2513     TheClusterifier.add(i.getCaseValueEx(), SMBB,
   2514         BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0);
   2515   }
   2516 
   2517   TheClusterifier.optimize();
   2518 
   2519   size_t numCmps = 0;
   2520   for (Clusterifier::RangeIterator i = TheClusterifier.begin(),
   2521        e = TheClusterifier.end(); i != e; ++i, ++numCmps) {
   2522     Clusterifier::Cluster &C = *i;
   2523     // Update edge weight for the cluster.
   2524     unsigned W = C.first.Weight;
   2525 
   2526     // FIXME: Currently work with ConstantInt based numbers.
   2527     // Changing it to APInt based is a pretty heavy for this commit.
   2528     Cases.push_back(Case(C.first.getLow().toConstantInt(),
   2529                          C.first.getHigh().toConstantInt(), C.second, W));
   2530 
   2531     if (C.first.getLow() != C.first.getHigh())
   2532     // A range counts double, since it requires two compares.
   2533     ++numCmps;
   2534   }
   2535 
   2536   return numCmps;
   2537 }
   2538 
   2539 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
   2540                                            MachineBasicBlock *Last) {
   2541   // Update JTCases.
   2542   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
   2543     if (JTCases[i].first.HeaderBB == First)
   2544       JTCases[i].first.HeaderBB = Last;
   2545 
   2546   // Update BitTestCases.
   2547   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
   2548     if (BitTestCases[i].Parent == First)
   2549       BitTestCases[i].Parent = Last;
   2550 }
   2551 
   2552 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
   2553   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
   2554 
   2555   // Figure out which block is immediately after the current one.
   2556   MachineBasicBlock *NextBlock = 0;
   2557   MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
   2558 
   2559   // If there is only the default destination, branch to it if it is not the
   2560   // next basic block.  Otherwise, just fall through.
   2561   if (!SI.getNumCases()) {
   2562     // Update machine-CFG edges.
   2563 
   2564     // If this is not a fall-through branch, emit the branch.
   2565     SwitchMBB->addSuccessor(Default);
   2566     if (Default != NextBlock)
   2567       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
   2568                               MVT::Other, getControlRoot(),
   2569                               DAG.getBasicBlock(Default)));
   2570 
   2571     return;
   2572   }
   2573 
   2574   // If there are any non-default case statements, create a vector of Cases
   2575   // representing each one, and sort the vector so that we can efficiently
   2576   // create a binary search tree from them.
   2577   CaseVector Cases;
   2578   size_t numCmps = Clusterify(Cases, SI);
   2579   DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
   2580                << ". Total compares: " << numCmps << '\n');
   2581   (void)numCmps;
   2582 
   2583   // Get the Value to be switched on and default basic blocks, which will be
   2584   // inserted into CaseBlock records, representing basic blocks in the binary
   2585   // search tree.
   2586   const Value *SV = SI.getCondition();
   2587 
   2588   // Push the initial CaseRec onto the worklist
   2589   CaseRecVector WorkList;
   2590   WorkList.push_back(CaseRec(SwitchMBB,0,0,
   2591                              CaseRange(Cases.begin(),Cases.end())));
   2592 
   2593   while (!WorkList.empty()) {
   2594     // Grab a record representing a case range to process off the worklist
   2595     CaseRec CR = WorkList.back();
   2596     WorkList.pop_back();
   2597 
   2598     if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
   2599       continue;
   2600 
   2601     // If the range has few cases (two or less) emit a series of specific
   2602     // tests.
   2603     if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
   2604       continue;
   2605 
   2606     // If the switch has more than N blocks, and is at least 40% dense, and the
   2607     // target supports indirect branches, then emit a jump table rather than
   2608     // lowering the switch to a binary tree of conditional branches.
   2609     // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries().
   2610     if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
   2611       continue;
   2612 
   2613     // Emit binary tree. We need to pick a pivot, and push left and right ranges
   2614     // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
   2615     handleBTSplitSwitchCase(CR, WorkList, SV, Default, SwitchMBB);
   2616   }
   2617 }
   2618 
   2619 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
   2620   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
   2621 
   2622   // Update machine-CFG edges with unique successors.
   2623   SmallSet<BasicBlock*, 32> Done;
   2624   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
   2625     BasicBlock *BB = I.getSuccessor(i);
   2626     bool Inserted = Done.insert(BB);
   2627     if (!Inserted)
   2628         continue;
   2629 
   2630     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
   2631     addSuccessorWithWeight(IndirectBrMBB, Succ);
   2632   }
   2633 
   2634   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
   2635                           MVT::Other, getControlRoot(),
   2636                           getValue(I.getAddress())));
   2637 }
   2638 
   2639 void SelectionDAGBuilder::visitFSub(const User &I) {
   2640   // -0.0 - X --> fneg
   2641   Type *Ty = I.getType();
   2642   if (isa<Constant>(I.getOperand(0)) &&
   2643       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
   2644     SDValue Op2 = getValue(I.getOperand(1));
   2645     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
   2646                              Op2.getValueType(), Op2));
   2647     return;
   2648   }
   2649 
   2650   visitBinary(I, ISD::FSUB);
   2651 }
   2652 
   2653 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
   2654   SDValue Op1 = getValue(I.getOperand(0));
   2655   SDValue Op2 = getValue(I.getOperand(1));
   2656   setValue(&I, DAG.getNode(OpCode, getCurSDLoc(),
   2657                            Op1.getValueType(), Op1, Op2));
   2658 }
   2659 
   2660 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
   2661   SDValue Op1 = getValue(I.getOperand(0));
   2662   SDValue Op2 = getValue(I.getOperand(1));
   2663 
   2664   EVT ShiftTy = TM.getTargetLowering()->getShiftAmountTy(Op2.getValueType());
   2665 
   2666   // Coerce the shift amount to the right type if we can.
   2667   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
   2668     unsigned ShiftSize = ShiftTy.getSizeInBits();
   2669     unsigned Op2Size = Op2.getValueType().getSizeInBits();
   2670     SDLoc DL = getCurSDLoc();
   2671 
   2672     // If the operand is smaller than the shift count type, promote it.
   2673     if (ShiftSize > Op2Size)
   2674       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
   2675 
   2676     // If the operand is larger than the shift count type but the shift
   2677     // count type has enough bits to represent any shift value, truncate
   2678     // it now. This is a common case and it exposes the truncate to
   2679     // optimization early.
   2680     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
   2681       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
   2682     // Otherwise we'll need to temporarily settle for some other convenient
   2683     // type.  Type legalization will make adjustments once the shiftee is split.
   2684     else
   2685       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
   2686   }
   2687 
   2688   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(),
   2689                            Op1.getValueType(), Op1, Op2));
   2690 }
   2691 
   2692 void SelectionDAGBuilder::visitSDiv(const User &I) {
   2693   SDValue Op1 = getValue(I.getOperand(0));
   2694   SDValue Op2 = getValue(I.getOperand(1));
   2695 
   2696   // Turn exact SDivs into multiplications.
   2697   // FIXME: This should be in DAGCombiner, but it doesn't have access to the
   2698   // exact bit.
   2699   if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
   2700       !isa<ConstantSDNode>(Op1) &&
   2701       isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
   2702     setValue(&I, TM.getTargetLowering()->BuildExactSDIV(Op1, Op2,
   2703                                                         getCurSDLoc(), DAG));
   2704   else
   2705     setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(),
   2706                              Op1, Op2));
   2707 }
   2708 
   2709 void SelectionDAGBuilder::visitICmp(const User &I) {
   2710   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
   2711   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
   2712     predicate = IC->getPredicate();
   2713   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
   2714     predicate = ICmpInst::Predicate(IC->getPredicate());
   2715   SDValue Op1 = getValue(I.getOperand(0));
   2716   SDValue Op2 = getValue(I.getOperand(1));
   2717   ISD::CondCode Opcode = getICmpCondCode(predicate);
   2718 
   2719   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2720   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
   2721 }
   2722 
   2723 void SelectionDAGBuilder::visitFCmp(const User &I) {
   2724   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
   2725   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
   2726     predicate = FC->getPredicate();
   2727   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
   2728     predicate = FCmpInst::Predicate(FC->getPredicate());
   2729   SDValue Op1 = getValue(I.getOperand(0));
   2730   SDValue Op2 = getValue(I.getOperand(1));
   2731   ISD::CondCode Condition = getFCmpCondCode(predicate);
   2732   if (TM.Options.NoNaNsFPMath)
   2733     Condition = getFCmpCodeWithoutNaN(Condition);
   2734   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2735   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
   2736 }
   2737 
   2738 void SelectionDAGBuilder::visitSelect(const User &I) {
   2739   SmallVector<EVT, 4> ValueVTs;
   2740   ComputeValueVTs(*TM.getTargetLowering(), I.getType(), ValueVTs);
   2741   unsigned NumValues = ValueVTs.size();
   2742   if (NumValues == 0) return;
   2743 
   2744   SmallVector<SDValue, 4> Values(NumValues);
   2745   SDValue Cond     = getValue(I.getOperand(0));
   2746   SDValue TrueVal  = getValue(I.getOperand(1));
   2747   SDValue FalseVal = getValue(I.getOperand(2));
   2748   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
   2749     ISD::VSELECT : ISD::SELECT;
   2750 
   2751   for (unsigned i = 0; i != NumValues; ++i)
   2752     Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
   2753                             TrueVal.getNode()->getValueType(TrueVal.getResNo()+i),
   2754                             Cond,
   2755                             SDValue(TrueVal.getNode(),
   2756                                     TrueVal.getResNo() + i),
   2757                             SDValue(FalseVal.getNode(),
   2758                                     FalseVal.getResNo() + i));
   2759 
   2760   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
   2761                            DAG.getVTList(&ValueVTs[0], NumValues),
   2762                            &Values[0], NumValues));
   2763 }
   2764 
   2765 void SelectionDAGBuilder::visitTrunc(const User &I) {
   2766   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
   2767   SDValue N = getValue(I.getOperand(0));
   2768   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2769   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
   2770 }
   2771 
   2772 void SelectionDAGBuilder::visitZExt(const User &I) {
   2773   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
   2774   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
   2775   SDValue N = getValue(I.getOperand(0));
   2776   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2777   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
   2778 }
   2779 
   2780 void SelectionDAGBuilder::visitSExt(const User &I) {
   2781   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
   2782   // SExt also can't be a cast to bool for same reason. So, nothing much to do
   2783   SDValue N = getValue(I.getOperand(0));
   2784   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2785   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
   2786 }
   2787 
   2788 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
   2789   // FPTrunc is never a no-op cast, no need to check
   2790   SDValue N = getValue(I.getOperand(0));
   2791   const TargetLowering *TLI = TM.getTargetLowering();
   2792   EVT DestVT = TLI->getValueType(I.getType());
   2793   setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurSDLoc(),
   2794                            DestVT, N,
   2795                            DAG.getTargetConstant(0, TLI->getPointerTy())));
   2796 }
   2797 
   2798 void SelectionDAGBuilder::visitFPExt(const User &I) {
   2799   // FPExt is never a no-op cast, no need to check
   2800   SDValue N = getValue(I.getOperand(0));
   2801   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2802   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
   2803 }
   2804 
   2805 void SelectionDAGBuilder::visitFPToUI(const User &I) {
   2806   // FPToUI is never a no-op cast, no need to check
   2807   SDValue N = getValue(I.getOperand(0));
   2808   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2809   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
   2810 }
   2811 
   2812 void SelectionDAGBuilder::visitFPToSI(const User &I) {
   2813   // FPToSI is never a no-op cast, no need to check
   2814   SDValue N = getValue(I.getOperand(0));
   2815   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2816   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
   2817 }
   2818 
   2819 void SelectionDAGBuilder::visitUIToFP(const User &I) {
   2820   // UIToFP is never a no-op cast, no need to check
   2821   SDValue N = getValue(I.getOperand(0));
   2822   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2823   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
   2824 }
   2825 
   2826 void SelectionDAGBuilder::visitSIToFP(const User &I) {
   2827   // SIToFP is never a no-op cast, no need to check
   2828   SDValue N = getValue(I.getOperand(0));
   2829   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2830   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
   2831 }
   2832 
   2833 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
   2834   // What to do depends on the size of the integer and the size of the pointer.
   2835   // We can either truncate, zero extend, or no-op, accordingly.
   2836   SDValue N = getValue(I.getOperand(0));
   2837   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2838   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
   2839 }
   2840 
   2841 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
   2842   // What to do depends on the size of the integer and the size of the pointer.
   2843   // We can either truncate, zero extend, or no-op, accordingly.
   2844   SDValue N = getValue(I.getOperand(0));
   2845   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2846   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
   2847 }
   2848 
   2849 void SelectionDAGBuilder::visitBitCast(const User &I) {
   2850   SDValue N = getValue(I.getOperand(0));
   2851   EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
   2852 
   2853   // BitCast assures us that source and destination are the same size so this is
   2854   // either a BITCAST or a no-op.
   2855   if (DestVT != N.getValueType())
   2856     setValue(&I, DAG.getNode(ISD::BITCAST, getCurSDLoc(),
   2857                              DestVT, N)); // convert types.
   2858   else
   2859     setValue(&I, N);            // noop cast.
   2860 }
   2861 
   2862 void SelectionDAGBuilder::visitInsertElement(const User &I) {
   2863   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   2864   SDValue InVec = getValue(I.getOperand(0));
   2865   SDValue InVal = getValue(I.getOperand(1));
   2866   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
   2867                                      getCurSDLoc(), TLI.getVectorIdxTy());
   2868   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
   2869                            TM.getTargetLowering()->getValueType(I.getType()),
   2870                            InVec, InVal, InIdx));
   2871 }
   2872 
   2873 void SelectionDAGBuilder::visitExtractElement(const User &I) {
   2874   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
   2875   SDValue InVec = getValue(I.getOperand(0));
   2876   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
   2877                                      getCurSDLoc(), TLI.getVectorIdxTy());
   2878   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
   2879                            TM.getTargetLowering()->getValueType(I.getType()),
   2880                            InVec, InIdx));
   2881 }
   2882 
   2883 // Utility for visitShuffleVector - Return true if every element in Mask,
   2884 // beginning from position Pos and ending in Pos+Size, falls within the
   2885 // specified sequential range [L, L+Pos). or is undef.
   2886 static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
   2887                                 unsigned Pos, unsigned Size, int Low) {
   2888   for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
   2889     if (Mask[i] >= 0 && Mask[i] != Low)
   2890       return false;
   2891   return true;
   2892 }
   2893 
   2894 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
   2895   SDValue Src1 = getValue(I.getOperand(0));
   2896   SDValue Src2 = getValue(I.getOperand(1));
   2897 
   2898   SmallVector<int, 8> Mask;
   2899   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
   2900   unsigned MaskNumElts = Mask.size();
   2901 
   2902   const TargetLowering *TLI = TM.getTargetLowering();
   2903   EVT VT = TLI->getValueType(I.getType());
   2904   EVT SrcVT = Src1.getValueType();
   2905   unsigned SrcNumElts = SrcVT.getVectorNumElements();
   2906 
   2907   if (SrcNumElts == MaskNumElts) {
   2908     setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
   2909                                       &Mask[0]));
   2910     return;
   2911   }
   2912 
   2913   // Normalize the shuffle vector since mask and vector length don't match.
   2914   if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
   2915     // Mask is longer than the source vectors and is a multiple of the source
   2916     // vectors.  We can use concatenate vector to make the mask and vectors
   2917     // lengths match.
   2918     if (SrcNumElts*2 == MaskNumElts) {
   2919       // First check for Src1 in low and Src2 in high
   2920       if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
   2921           isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
   2922         // The shuffle is concatenating two vectors together.
   2923         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
   2924                                  VT, Src1, Src2));
   2925         return;
   2926       }
   2927       // Then check for Src2 in low and Src1 in high
   2928       if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
   2929           isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
   2930         // The shuffle is concatenating two vectors together.
   2931         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
   2932                                  VT, Src2, Src1));
   2933         return;
   2934       }
   2935     }
   2936 
   2937     // Pad both vectors with undefs to make them the same length as the mask.
   2938     unsigned NumConcat = MaskNumElts / SrcNumElts;
   2939     bool Src1U = Src1.getOpcode() == ISD::UNDEF;
   2940     bool Src2U = Src2.getOpcode() == ISD::UNDEF;
   2941     SDValue UndefVal = DAG.getUNDEF(SrcVT);
   2942 
   2943     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
   2944     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
   2945     MOps1[0] = Src1;
   2946     MOps2[0] = Src2;
   2947 
   2948     Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
   2949                                                   getCurSDLoc(), VT,
   2950                                                   &MOps1[0], NumConcat);
   2951     Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
   2952                                                   getCurSDLoc(), VT,
   2953                                                   &MOps2[0], NumConcat);
   2954 
   2955     // Readjust mask for new input vector length.
   2956     SmallVector<int, 8> MappedOps;
   2957     for (unsigned i = 0; i != MaskNumElts; ++i) {
   2958       int Idx = Mask[i];
   2959       if (Idx >= (int)SrcNumElts)
   2960         Idx -= SrcNumElts - MaskNumElts;
   2961       MappedOps.push_back(Idx);
   2962     }
   2963 
   2964     setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
   2965                                       &MappedOps[0]));
   2966     return;
   2967   }
   2968 
   2969   if (SrcNumElts > MaskNumElts) {
   2970     // Analyze the access pattern of the vector to see if we can extract
   2971     // two subvectors and do the shuffle. The analysis is done by calculating
   2972     // the range of elements the mask access on both vectors.
   2973     int MinRange[2] = { static_cast<int>(SrcNumElts),
   2974                         static_cast<int>(SrcNumElts)};
   2975     int MaxRange[2] = {-1, -1};
   2976 
   2977     for (unsigned i = 0; i != MaskNumElts; ++i) {
   2978       int Idx = Mask[i];
   2979       unsigned Input = 0;
   2980       if (Idx < 0)
   2981         continue;
   2982 
   2983       if (Idx >= (int)SrcNumElts) {
   2984         Input = 1;
   2985         Idx -= SrcNumElts;
   2986       }
   2987       if (Idx > MaxRange[Input])
   2988         MaxRange[Input] = Idx;
   2989       if (Idx < MinRange[Input])
   2990         MinRange[Input] = Idx;
   2991     }
   2992 
   2993     // Check if the access is smaller than the vector size and can we find
   2994     // a reasonable extract index.
   2995     int RangeUse[2] = { -1, -1 };  // 0 = Unused, 1 = Extract, -1 = Can not
   2996                                    // Extract.
   2997     int StartIdx[2];  // StartIdx to extract from
   2998     for (unsigned Input = 0; Input < 2; ++Input) {
   2999       if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
   3000         RangeUse[Input] = 0; // Unused
   3001         StartIdx[Input] = 0;
   3002         continue;
   3003       }
   3004 
   3005       // Find a good start index that is a multiple of the mask length. Then
   3006       // see if the rest of the elements are in range.
   3007       StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
   3008       if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
   3009           StartIdx[Input] + MaskNumElts <= SrcNumElts)
   3010         RangeUse[Input] = 1; // Extract from a multiple of the mask length.
   3011     }
   3012 
   3013     if (RangeUse[0] == 0 && RangeUse[1] == 0) {
   3014       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
   3015       return;
   3016     }
   3017     if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
   3018       // Extract appropriate subvector and generate a vector shuffle
   3019       for (unsigned Input = 0; Input < 2; ++Input) {
   3020         SDValue &Src = Input == 0 ? Src1 : Src2;
   3021         if (RangeUse[Input] == 0)
   3022           Src = DAG.getUNDEF(VT);
   3023         else
   3024           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurSDLoc(), VT,
   3025                             Src, DAG.getConstant(StartIdx[Input],
   3026                                                  TLI->getVectorIdxTy()));
   3027       }
   3028 
   3029       // Calculate new mask.
   3030       SmallVector<int, 8> MappedOps;
   3031       for (unsigned i = 0; i != MaskNumElts; ++i) {
   3032         int Idx = Mask[i];
   3033         if (Idx >= 0) {
   3034           if (Idx < (int)SrcNumElts)
   3035             Idx -= StartIdx[0];
   3036           else
   3037             Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
   3038         }
   3039         MappedOps.push_back(Idx);
   3040       }
   3041 
   3042       setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
   3043                                         &MappedOps[0]));
   3044       return;
   3045     }
   3046   }
   3047 
   3048   // We can't use either concat vectors or extract subvectors so fall back to
   3049   // replacing the shuffle with extract and build vector.
   3050   // to insert and build vector.
   3051   EVT EltVT = VT.getVectorElementType();
   3052   EVT IdxVT = TLI->getVectorIdxTy();
   3053   SmallVector<SDValue,8> Ops;
   3054   for (unsigned i = 0; i != MaskNumElts; ++i) {
   3055     int Idx = Mask[i];
   3056     SDValue Res;
   3057 
   3058     if (Idx < 0) {
   3059       Res = DAG.getUNDEF(EltVT);
   3060     } else {
   3061       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
   3062       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
   3063 
   3064       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
   3065                         EltVT, Src, DAG.getConstant(Idx, IdxVT));
   3066     }
   3067 
   3068     Ops.push_back(Res);
   3069   }
   3070 
   3071   setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
   3072                            VT, &Ops[0], Ops.size()));
   3073 }
   3074 
   3075 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
   3076   const Value *Op0 = I.getOperand(0);
   3077   const Value *Op1 = I.getOperand(1);
   3078   Type *AggTy = I.getType();
   3079   Type *ValTy = Op1->getType();
   3080   bool IntoUndef = isa<UndefValue>(Op0);
   3081   bool FromUndef = isa<UndefValue>(Op1);
   3082 
   3083   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
   3084 
   3085   const TargetLowering *TLI = TM.getTargetLowering();
   3086   SmallVector<EVT, 4> AggValueVTs;
   3087   ComputeValueVTs(*TLI, AggTy, AggValueVTs);
   3088   SmallVector<EVT, 4> ValValueVTs;
   3089   ComputeValueVTs(*TLI, ValTy, ValValueVTs);
   3090 
   3091   unsigned NumAggValues = AggValueVTs.size();
   3092   unsigned NumValValues = ValValueVTs.size();
   3093   SmallVector<SDValue, 4> Values(NumAggValues);
   3094 
   3095   SDValue Agg = getValue(Op0);
   3096   unsigned i = 0;
   3097   // Copy the beginning value(s) from the original aggregate.
   3098   for (; i != LinearIndex; ++i)
   3099     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
   3100                 SDValue(Agg.getNode(), Agg.getResNo() + i);
   3101   // Copy values from the inserted value(s).
   3102   if (NumValValues) {
   3103     SDValue Val = getValue(Op1);
   3104     for (; i != LinearIndex + NumValValues; ++i)
   3105       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
   3106                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
   3107   }
   3108   // Copy remaining value(s) from the original aggregate.
   3109   for (; i != NumAggValues; ++i)
   3110     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
   3111                 SDValue(Agg.getNode(), Agg.getResNo() + i);
   3112 
   3113   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
   3114                            DAG.getVTList(&AggValueVTs[0], NumAggValues),
   3115                            &Values[0], NumAggValues));
   3116 }
   3117 
   3118 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
   3119   const Value *Op0 = I.getOperand(0);
   3120   Type *AggTy = Op0->getType();
   3121   Type *ValTy = I.getType();
   3122   bool OutOfUndef = isa<UndefValue>(Op0);
   3123 
   3124   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
   3125 
   3126   const TargetLowering *TLI = TM.getTargetLowering();
   3127   SmallVector<EVT, 4> ValValueVTs;
   3128   ComputeValueVTs(*TLI, ValTy, ValValueVTs);
   3129 
   3130   unsigned NumValValues = ValValueVTs.size();
   3131 
   3132   // Ignore a extractvalue that produces an empty object
   3133   if (!NumValValues) {
   3134     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
   3135     return;
   3136   }
   3137 
   3138   SmallVector<SDValue, 4> Values(NumValValues);
   3139 
   3140   SDValue Agg = getValue(Op0);
   3141   // Copy out the selected value(s).
   3142   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
   3143     Values[i - LinearIndex] =
   3144       OutOfUndef ?
   3145         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
   3146         SDValue(Agg.getNode(), Agg.getResNo() + i);
   3147 
   3148   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
   3149                            DAG.getVTList(&ValValueVTs[0], NumValValues),
   3150                            &Values[0], NumValValues));
   3151 }
   3152 
   3153 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
   3154   SDValue N = getValue(I.getOperand(0));
   3155   // Note that the pointer operand may be a vector of pointers. Take the scalar
   3156   // element which holds a pointer.
   3157   Type *Ty = I.getOperand(0)->getType()->getScalarType();
   3158 
   3159   for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
   3160        OI != E; ++OI) {
   3161     const Value *Idx = *OI;
   3162     if (StructType *StTy = dyn_cast<StructType>(Ty)) {
   3163       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
   3164       if (Field) {
   3165         // N = N + Offset
   3166         uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
   3167         N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
   3168                         DAG.getConstant(Offset, N.getValueType()));
   3169       }
   3170 
   3171       Ty = StTy->getElementType(Field);
   3172     } else {
   3173       Ty = cast<SequentialType>(Ty)->getElementType();
   3174 
   3175       // If this is a constant subscript, handle it quickly.
   3176       const TargetLowering *TLI = TM.getTargetLowering();
   3177       if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
   3178         if (CI->isZero()) continue;
   3179         uint64_t Offs =
   3180             TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
   3181         SDValue OffsVal;
   3182         EVT PTy = TLI->getPointerTy();
   3183         unsigned PtrBits = PTy.getSizeInBits();
   3184         if (PtrBits < 64)
   3185           OffsVal = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(),
   3186                                 TLI->getPointerTy(),
   3187                                 DAG.getConstant(Offs, MVT::i64));
   3188         else
   3189           OffsVal = DAG.getIntPtrConstant(Offs);
   3190 
   3191         N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
   3192                         OffsVal);
   3193         continue;
   3194       }
   3195 
   3196       // N = N + Idx * ElementSize;
   3197       APInt ElementSize = APInt(TLI->getPointerTy().getSizeInBits(),
   3198                                 TD->getTypeAllocSize(Ty));
   3199       SDValue IdxN = getValue(Idx);
   3200 
   3201       // If the index is smaller or larger than intptr_t, truncate or extend
   3202       // it.
   3203       IdxN = DAG.getSExtOrTrunc(IdxN, getCurSDLoc(), N.getValueType());
   3204 
   3205       // If this is a multiply by a power of two, turn it into a shl
   3206       // immediately.  This is a very common case.
   3207       if (ElementSize != 1) {
   3208         if (ElementSize.isPowerOf2()) {
   3209           unsigned Amt = ElementSize.logBase2();
   3210           IdxN = DAG.getNode(ISD::SHL, getCurSDLoc(),
   3211                              N.getValueType(), IdxN,
   3212                              DAG.getConstant(Amt, IdxN.getValueType()));
   3213         } else {
   3214           SDValue Scale = DAG.getConstant(ElementSize, IdxN.getValueType());
   3215           IdxN = DAG.getNode(ISD::MUL, getCurSDLoc(),
   3216                              N.getValueType(), IdxN, Scale);
   3217         }
   3218       }
   3219 
   3220       N = DAG.getNode(ISD::ADD, getCurSDLoc(),
   3221                       N.getValueType(), N, IdxN);
   3222     }
   3223   }
   3224 
   3225   setValue(&I, N);
   3226 }
   3227 
   3228 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
   3229   // If this is a fixed sized alloca in the entry block of the function,
   3230   // allocate it statically on the stack.
   3231   if (FuncInfo.StaticAllocaMap.count(&I))
   3232     return;   // getValue will auto-populate this.
   3233 
   3234   Type *Ty = I.getAllocatedType();
   3235   const TargetLowering *TLI = TM.getTargetLowering();
   3236   uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
   3237   unsigned Align =
   3238     std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
   3239              I.getAlignment());
   3240 
   3241   SDValue AllocSize = getValue(I.getArraySize());
   3242 
   3243   EVT IntPtr = TLI->getPointerTy();
   3244   if (AllocSize.getValueType() != IntPtr)
   3245     AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurSDLoc(), IntPtr);
   3246 
   3247   AllocSize = DAG.getNode(ISD::MUL, getCurSDLoc(), IntPtr,
   3248                           AllocSize,
   3249                           DAG.getConstant(TySize, IntPtr));
   3250 
   3251   // Handle alignment.  If the requested alignment is less than or equal to
   3252   // the stack alignment, ignore it.  If the size is greater than or equal to
   3253   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
   3254   unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
   3255   if (Align <= StackAlign)
   3256     Align = 0;
   3257 
   3258   // Round the size of the allocation up to the stack alignment size
   3259   // by add SA-1 to the size.
   3260   AllocSize = DAG.getNode(ISD::ADD, getCurSDLoc(),
   3261                           AllocSize.getValueType(), AllocSize,
   3262                           DAG.getIntPtrConstant(StackAlign-1));
   3263 
   3264   // Mask out the low bits for alignment purposes.
   3265   AllocSize = DAG.getNode(ISD::AND, getCurSDLoc(),
   3266                           AllocSize.getValueType(), AllocSize,
   3267                           DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
   3268 
   3269   SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
   3270   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
   3271   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurSDLoc(),
   3272                             VTs, Ops, 3);
   3273   setValue(&I, DSA);
   3274   DAG.setRoot(DSA.getValue(1));
   3275 
   3276   // Inform the Frame Information that we have just allocated a variable-sized
   3277   // object.
   3278   FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1);
   3279 }
   3280 
   3281 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
   3282   if (I.isAtomic())
   3283     return visitAtomicLoad(I);
   3284 
   3285   const Value *SV = I.getOperand(0);
   3286   SDValue Ptr = getValue(SV);
   3287 
   3288   Type *Ty = I.getType();
   3289 
   3290   bool isVolatile = I.isVolatile();
   3291   bool isNonTemporal = I.getMetadata("nontemporal") != 0;
   3292   bool isInvariant = I.getMetadata("invariant.load") != 0;
   3293   unsigned Alignment = I.getAlignment();
   3294   const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
   3295   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
   3296 
   3297   SmallVector<EVT, 4> ValueVTs;
   3298   SmallVector<uint64_t, 4> Offsets;
   3299   ComputeValueVTs(*TM.getTargetLowering(), Ty, ValueVTs, &Offsets);
   3300   unsigned NumValues = ValueVTs.size();
   3301   if (NumValues == 0)
   3302     return;
   3303 
   3304   SDValue Root;
   3305   bool ConstantMemory = false;
   3306   if (I.isVolatile() || NumValues > MaxParallelChains)
   3307     // Serialize volatile loads with other side effects.
   3308     Root = getRoot();
   3309   else if (AA->pointsToConstantMemory(
   3310              AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), TBAAInfo))) {
   3311     // Do not serialize (non-volatile) loads of constant memory with anything.
   3312     Root = DAG.getEntryNode();
   3313     ConstantMemory = true;
   3314   } else {
   3315     // Do not serialize non-volatile loads against each other.
   3316     Root = DAG.getRoot();
   3317   }
   3318 
   3319   SmallVector<SDValue, 4> Values(NumValues);
   3320   SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
   3321                                           NumValues));
   3322   EVT PtrVT = Ptr.getValueType();
   3323   unsigned ChainI = 0;
   3324   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
   3325     // Serializing loads here may result in excessive register pressure, and
   3326     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
   3327     // could recover a bit by hoisting nodes upward in the chain by recognizing
   3328     // they are side-effect free or do not alias. The optimizer should really
   3329     // avoid this case by converting large object/array copies to llvm.memcpy
   3330     // (MaxParallelChains should always remain as failsafe).
   3331     if (ChainI == MaxParallelChains) {
   3332       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
   3333       SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
   3334                                   MVT::Other, &Chains[0], ChainI);
   3335       Root = Chain;
   3336       ChainI = 0;
   3337     }
   3338     SDValue A = DAG.getNode(ISD::ADD, getCurSDLoc(),
   3339                             PtrVT, Ptr,
   3340                             DAG.getConstant(Offsets[i], PtrVT));
   3341     SDValue L = DAG.getLoad(ValueVTs[i], getCurSDLoc(), Root,
   3342                             A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
   3343                             isNonTemporal, isInvariant, Alignment, TBAAInfo,
   3344                             Ranges);
   3345 
   3346     Values[i] = L;
   3347     Chains[ChainI] = L.getValue(1);
   3348   }
   3349 
   3350   if (!ConstantMemory) {
   3351     SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
   3352                                 MVT::Other, &Chains[0], ChainI);
   3353     if (isVolatile)
   3354       DAG.setRoot(Chain);
   3355     else
   3356       PendingLoads.push_back(Chain);
   3357   }
   3358 
   3359   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
   3360                            DAG.getVTList(&ValueVTs[0], NumValues),
   3361                            &Values[0], NumValues));
   3362 }
   3363 
   3364 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
   3365   if (I.isAtomic())
   3366     return visitAtomicStore(I);
   3367 
   3368   const Value *SrcV = I.getOperand(0);
   3369   const Value *PtrV = I.getOperand(1);
   3370 
   3371   SmallVector<EVT, 4> ValueVTs;
   3372   SmallVector<uint64_t, 4> Offsets;
   3373   ComputeValueVTs(*TM.getTargetLowering(), SrcV->getType(), ValueVTs, &Offsets);
   3374   unsigned NumValues = ValueVTs.size();
   3375   if (NumValues == 0)
   3376     return;
   3377 
   3378   // Get the lowered operands. Note that we do this after
   3379   // checking if NumResults is zero, because with zero results
   3380   // the operands won't have values in the map.
   3381   SDValue Src = getValue(SrcV);
   3382   SDValue Ptr = getValue(PtrV);
   3383 
   3384   SDValue Root = getRoot();
   3385   SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
   3386                                           NumValues));
   3387   EVT PtrVT = Ptr.getValueType();
   3388   bool isVolatile = I.isVolatile();
   3389   bool isNonTemporal = I.getMetadata("nontemporal") != 0;
   3390   unsigned Alignment = I.getAlignment();
   3391   const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
   3392 
   3393   unsigned ChainI = 0;
   3394   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
   3395     // See visitLoad comments.
   3396     if (ChainI == MaxParallelChains) {
   3397       SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
   3398                                   MVT::Other, &Chains[0], ChainI);
   3399       Root = Chain;
   3400       ChainI = 0;
   3401     }
   3402     SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT, Ptr,
   3403                               DAG.getConstant(Offsets[i], PtrVT));
   3404     SDValue St = DAG.getStore(Root, getCurSDLoc(),
   3405                               SDValue(Src.getNode(), Src.getResNo() + i),
   3406                               Add, MachinePointerInfo(PtrV, Offsets[i]),
   3407                               isVolatile, isNonTemporal, Alignment, TBAAInfo);
   3408     Chains[ChainI] = St;
   3409   }
   3410 
   3411   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
   3412                                   MVT::Other, &Chains[0], ChainI);
   3413   DAG.setRoot(StoreNode);
   3414 }
   3415 
   3416 static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
   3417                                     SynchronizationScope Scope,
   3418                                     bool Before, SDLoc dl,
   3419                                     SelectionDAG &DAG,
   3420                                     const TargetLowering &TLI) {
   3421   // Fence, if necessary
   3422   if (Before) {
   3423     if (Order == AcquireRelease || Order == SequentiallyConsistent)
   3424       Order = Release;
   3425     else if (Order == Acquire || Order == Monotonic)
   3426       return Chain;
   3427   } else {
   3428     if (Order == AcquireRelease)
   3429       Order = Acquire;
   3430     else if (Order == Release || Order == Monotonic)
   3431       return Chain;
   3432   }
   3433   SDValue Ops[3];
   3434   Ops[0] = Chain;
   3435   Ops[1] = DAG.getConstant(Order, TLI.getPointerTy());
   3436   Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy());
   3437   return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3);
   3438 }
   3439 
   3440 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
   3441   SDLoc dl = getCurSDLoc();
   3442   AtomicOrdering Order = I.getOrdering();
   3443   SynchronizationScope Scope = I.getSynchScope();
   3444 
   3445   SDValue InChain = getRoot();
   3446 
   3447   const TargetLowering *TLI = TM.getTargetLowering();
   3448   if (TLI->getInsertFencesForAtomic())
   3449     InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
   3450                                    DAG, *TLI);
   3451 
   3452   SDValue L =
   3453     DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
   3454                   getValue(I.getCompareOperand()).getValueType().getSimpleVT(),
   3455                   InChain,
   3456                   getValue(I.getPointerOperand()),
   3457                   getValue(I.getCompareOperand()),
   3458                   getValue(I.getNewValOperand()),
   3459                   MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
   3460                   TLI->getInsertFencesForAtomic() ? Monotonic : Order,
   3461                   Scope);
   3462 
   3463   SDValue OutChain = L.getValue(1);
   3464 
   3465   if (TLI->getInsertFencesForAtomic())
   3466     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3467                                     DAG, *TLI);
   3468 
   3469   setValue(&I, L);
   3470   DAG.setRoot(OutChain);
   3471 }
   3472 
   3473 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
   3474   SDLoc dl = getCurSDLoc();
   3475   ISD::NodeType NT;
   3476   switch (I.getOperation()) {
   3477   default: llvm_unreachable("Unknown atomicrmw operation");
   3478   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
   3479   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
   3480   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
   3481   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
   3482   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
   3483   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
   3484   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
   3485   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
   3486   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
   3487   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
   3488   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
   3489   }
   3490   AtomicOrdering Order = I.getOrdering();
   3491   SynchronizationScope Scope = I.getSynchScope();
   3492 
   3493   SDValue InChain = getRoot();
   3494 
   3495   const TargetLowering *TLI = TM.getTargetLowering();
   3496   if (TLI->getInsertFencesForAtomic())
   3497     InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
   3498                                    DAG, *TLI);
   3499 
   3500   SDValue L =
   3501     DAG.getAtomic(NT, dl,
   3502                   getValue(I.getValOperand()).getValueType().getSimpleVT(),
   3503                   InChain,
   3504                   getValue(I.getPointerOperand()),
   3505                   getValue(I.getValOperand()),
   3506                   I.getPointerOperand(), 0 /* Alignment */,
   3507                   TLI->getInsertFencesForAtomic() ? Monotonic : Order,
   3508                   Scope);
   3509 
   3510   SDValue OutChain = L.getValue(1);
   3511 
   3512   if (TLI->getInsertFencesForAtomic())
   3513     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3514                                     DAG, *TLI);
   3515 
   3516   setValue(&I, L);
   3517   DAG.setRoot(OutChain);
   3518 }
   3519 
   3520 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
   3521   SDLoc dl = getCurSDLoc();
   3522   const TargetLowering *TLI = TM.getTargetLowering();
   3523   SDValue Ops[3];
   3524   Ops[0] = getRoot();
   3525   Ops[1] = DAG.getConstant(I.getOrdering(), TLI->getPointerTy());
   3526   Ops[2] = DAG.getConstant(I.getSynchScope(), TLI->getPointerTy());
   3527   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3));
   3528 }
   3529 
   3530 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
   3531   SDLoc dl = getCurSDLoc();
   3532   AtomicOrdering Order = I.getOrdering();
   3533   SynchronizationScope Scope = I.getSynchScope();
   3534 
   3535   SDValue InChain = getRoot();
   3536 
   3537   const TargetLowering *TLI = TM.getTargetLowering();
   3538   EVT VT = TLI->getValueType(I.getType());
   3539 
   3540   if (I.getAlignment() < VT.getSizeInBits() / 8)
   3541     report_fatal_error("Cannot generate unaligned atomic load");
   3542 
   3543   SDValue L =
   3544     DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
   3545                   getValue(I.getPointerOperand()),
   3546                   I.getPointerOperand(), I.getAlignment(),
   3547                   TLI->getInsertFencesForAtomic() ? Monotonic : Order,
   3548                   Scope);
   3549 
   3550   SDValue OutChain = L.getValue(1);
   3551 
   3552   if (TLI->getInsertFencesForAtomic())
   3553     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3554                                     DAG, *TLI);
   3555 
   3556   setValue(&I, L);
   3557   DAG.setRoot(OutChain);
   3558 }
   3559 
   3560 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
   3561   SDLoc dl = getCurSDLoc();
   3562 
   3563   AtomicOrdering Order = I.getOrdering();
   3564   SynchronizationScope Scope = I.getSynchScope();
   3565 
   3566   SDValue InChain = getRoot();
   3567 
   3568   const TargetLowering *TLI = TM.getTargetLowering();
   3569   EVT VT = TLI->getValueType(I.getValueOperand()->getType());
   3570 
   3571   if (I.getAlignment() < VT.getSizeInBits() / 8)
   3572     report_fatal_error("Cannot generate unaligned atomic store");
   3573 
   3574   if (TLI->getInsertFencesForAtomic())
   3575     InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
   3576                                    DAG, *TLI);
   3577 
   3578   SDValue OutChain =
   3579     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
   3580                   InChain,
   3581                   getValue(I.getPointerOperand()),
   3582                   getValue(I.getValueOperand()),
   3583                   I.getPointerOperand(), I.getAlignment(),
   3584                   TLI->getInsertFencesForAtomic() ? Monotonic : Order,
   3585                   Scope);
   3586 
   3587   if (TLI->getInsertFencesForAtomic())
   3588     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3589                                     DAG, *TLI);
   3590 
   3591   DAG.setRoot(OutChain);
   3592 }
   3593 
   3594 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
   3595 /// node.
   3596 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
   3597                                                unsigned Intrinsic) {
   3598   bool HasChain = !I.doesNotAccessMemory();
   3599   bool OnlyLoad = HasChain && I.onlyReadsMemory();
   3600 
   3601   // Build the operand list.
   3602   SmallVector<SDValue, 8> Ops;
   3603   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
   3604     if (OnlyLoad) {
   3605       // We don't need to serialize loads against other loads.
   3606       Ops.push_back(DAG.getRoot());
   3607     } else {
   3608       Ops.push_back(getRoot());
   3609     }
   3610   }
   3611 
   3612   // Info is set by getTgtMemInstrinsic
   3613   TargetLowering::IntrinsicInfo Info;
   3614   const TargetLowering *TLI = TM.getTargetLowering();
   3615   bool IsTgtIntrinsic = TLI->getTgtMemIntrinsic(Info, I, Intrinsic);
   3616 
   3617   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
   3618   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
   3619       Info.opc == ISD::INTRINSIC_W_CHAIN)
   3620     Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI->getPointerTy()));
   3621 
   3622   // Add all operands of the call to the operand list.
   3623   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
   3624     SDValue Op = getValue(I.getArgOperand(i));
   3625     Ops.push_back(Op);
   3626   }
   3627 
   3628   SmallVector<EVT, 4> ValueVTs;
   3629   ComputeValueVTs(*TLI, I.getType(), ValueVTs);
   3630 
   3631   if (HasChain)
   3632     ValueVTs.push_back(MVT::Other);
   3633 
   3634   SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
   3635 
   3636   // Create the node.
   3637   SDValue Result;
   3638   if (IsTgtIntrinsic) {
   3639     // This is target intrinsic that touches memory
   3640     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
   3641                                      VTs, &Ops[0], Ops.size(),
   3642                                      Info.memVT,
   3643                                    MachinePointerInfo(Info.ptrVal, Info.offset),
   3644                                      Info.align, Info.vol,
   3645                                      Info.readMem, Info.writeMem);
   3646   } else if (!HasChain) {
   3647     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(),
   3648                          VTs, &Ops[0], Ops.size());
   3649   } else if (!I.getType()->isVoidTy()) {
   3650     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(),
   3651                          VTs, &Ops[0], Ops.size());
   3652   } else {
   3653     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(),
   3654                          VTs, &Ops[0], Ops.size());
   3655   }
   3656 
   3657   if (HasChain) {
   3658     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
   3659     if (OnlyLoad)
   3660       PendingLoads.push_back(Chain);
   3661     else
   3662       DAG.setRoot(Chain);
   3663   }
   3664 
   3665   if (!I.getType()->isVoidTy()) {
   3666     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
   3667       EVT VT = TLI->getValueType(PTy);
   3668       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
   3669     }
   3670 
   3671     setValue(&I, Result);
   3672   }
   3673 }
   3674 
   3675 /// GetSignificand - Get the significand and build it into a floating-point
   3676 /// number with exponent of 1:
   3677 ///
   3678 ///   Op = (Op & 0x007fffff) | 0x3f800000;
   3679 ///
   3680 /// where Op is the hexadecimal representation of floating point value.
   3681 static SDValue
   3682 GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) {
   3683   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
   3684                            DAG.getConstant(0x007fffff, MVT::i32));
   3685   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
   3686                            DAG.getConstant(0x3f800000, MVT::i32));
   3687   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
   3688 }
   3689 
   3690 /// GetExponent - Get the exponent:
   3691 ///
   3692 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
   3693 ///
   3694 /// where Op is the hexadecimal representation of floating point value.
   3695 static SDValue
   3696 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
   3697             SDLoc dl) {
   3698   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
   3699                            DAG.getConstant(0x7f800000, MVT::i32));
   3700   SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
   3701                            DAG.getConstant(23, TLI.getPointerTy()));
   3702   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
   3703                            DAG.getConstant(127, MVT::i32));
   3704   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
   3705 }
   3706 
   3707 /// getF32Constant - Get 32-bit floating point constant.
   3708 static SDValue
   3709 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
   3710   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)),
   3711                            MVT::f32);
   3712 }
   3713 
   3714 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
   3715 /// limited-precision mode.
   3716 static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG,
   3717                          const TargetLowering &TLI) {
   3718   if (Op.getValueType() == MVT::f32 &&
   3719       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   3720 
   3721     // Put the exponent in the right bit position for later addition to the
   3722     // final result:
   3723     //
   3724     //   #define LOG2OFe 1.4426950f
   3725     //   IntegerPartOfX = ((int32_t)(X * LOG2OFe));
   3726     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
   3727                              getF32Constant(DAG, 0x3fb8aa3b));
   3728     SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
   3729 
   3730     //   FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
   3731     SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
   3732     SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
   3733 
   3734     //   IntegerPartOfX <<= 23;
   3735     IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
   3736                                  DAG.getConstant(23, TLI.getPointerTy()));
   3737 
   3738     SDValue TwoToFracPartOfX;
   3739     if (LimitFloatPrecision <= 6) {
   3740       // For floating-point precision of 6:
   3741       //
   3742       //   TwoToFractionalPartOfX =
   3743       //     0.997535578f +
   3744       //       (0.735607626f + 0.252464424f * x) * x;
   3745       //
   3746       // error 0.0144103317, which is 6 bits
   3747       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3748                                getF32Constant(DAG, 0x3e814304));
   3749       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   3750                                getF32Constant(DAG, 0x3f3c50c8));
   3751       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3752       TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3753                                      getF32Constant(DAG, 0x3f7f5e7e));
   3754     } else if (LimitFloatPrecision <= 12) {
   3755       // For floating-point precision of 12:
   3756       //
   3757       //   TwoToFractionalPartOfX =
   3758       //     0.999892986f +
   3759       //       (0.696457318f +
   3760       //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
   3761       //
   3762       // 0.000107046256 error, which is 13 to 14 bits
   3763       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3764                                getF32Constant(DAG, 0x3da235e3));
   3765       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   3766                                getF32Constant(DAG, 0x3e65b8f3));
   3767       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3768       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3769                                getF32Constant(DAG, 0x3f324b07));
   3770       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3771       TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   3772                                      getF32Constant(DAG, 0x3f7ff8fd));
   3773     } else { // LimitFloatPrecision <= 18
   3774       // For floating-point precision of 18:
   3775       //
   3776       //   TwoToFractionalPartOfX =
   3777       //     0.999999982f +
   3778       //       (0.693148872f +
   3779       //         (0.240227044f +
   3780       //           (0.554906021e-1f +
   3781       //             (0.961591928e-2f +
   3782       //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
   3783       //
   3784       // error 2.47208000*10^(-7), which is better than 18 bits
   3785       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3786                                getF32Constant(DAG, 0x3924b03e));
   3787       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   3788                                getF32Constant(DAG, 0x3ab24b87));
   3789       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3790       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3791                                getF32Constant(DAG, 0x3c1d8c17));
   3792       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3793       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   3794                                getF32Constant(DAG, 0x3d634a1d));
   3795       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   3796       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   3797                                getF32Constant(DAG, 0x3e75fe14));
   3798       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   3799       SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
   3800                                 getF32Constant(DAG, 0x3f317234));
   3801       SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
   3802       TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
   3803                                      getF32Constant(DAG, 0x3f800000));
   3804     }
   3805 
   3806     // Add the exponent into the result in integer domain.
   3807     SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFracPartOfX);
   3808     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
   3809                        DAG.getNode(ISD::ADD, dl, MVT::i32,
   3810                                    t13, IntegerPartOfX));
   3811   }
   3812 
   3813   // No special expansion.
   3814   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
   3815 }
   3816 
   3817 /// expandLog - Lower a log intrinsic. Handles the special sequences for
   3818 /// limited-precision mode.
   3819 static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG,
   3820                          const TargetLowering &TLI) {
   3821   if (Op.getValueType() == MVT::f32 &&
   3822       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   3823     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
   3824 
   3825     // Scale the exponent by log(2) [0.69314718f].
   3826     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
   3827     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
   3828                                         getF32Constant(DAG, 0x3f317218));
   3829 
   3830     // Get the significand and build it into a floating-point number with
   3831     // exponent of 1.
   3832     SDValue X = GetSignificand(DAG, Op1, dl);
   3833 
   3834     SDValue LogOfMantissa;
   3835     if (LimitFloatPrecision <= 6) {
   3836       // For floating-point precision of 6:
   3837       //
   3838       //   LogofMantissa =
   3839       //     -1.1609546f +
   3840       //       (1.4034025f - 0.23903021f * x) * x;
   3841       //
   3842       // error 0.0034276066, which is better than 8 bits
   3843       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3844                                getF32Constant(DAG, 0xbe74c456));
   3845       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3846                                getF32Constant(DAG, 0x3fb3a2b1));
   3847       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3848       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3849                                   getF32Constant(DAG, 0x3f949a29));
   3850     } else if (LimitFloatPrecision <= 12) {
   3851       // For floating-point precision of 12:
   3852       //
   3853       //   LogOfMantissa =
   3854       //     -1.7417939f +
   3855       //       (2.8212026f +
   3856       //         (-1.4699568f +
   3857       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
   3858       //
   3859       // error 0.000061011436, which is 14 bits
   3860       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3861                                getF32Constant(DAG, 0xbd67b6d6));
   3862       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3863                                getF32Constant(DAG, 0x3ee4f4b8));
   3864       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3865       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3866                                getF32Constant(DAG, 0x3fbc278b));
   3867       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3868       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3869                                getF32Constant(DAG, 0x40348e95));
   3870       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3871       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3872                                   getF32Constant(DAG, 0x3fdef31a));
   3873     } else { // LimitFloatPrecision <= 18
   3874       // For floating-point precision of 18:
   3875       //
   3876       //   LogOfMantissa =
   3877       //     -2.1072184f +
   3878       //       (4.2372794f +
   3879       //         (-3.7029485f +
   3880       //           (2.2781945f +
   3881       //             (-0.87823314f +
   3882       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
   3883       //
   3884       // error 0.0000023660568, which is better than 18 bits
   3885       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3886                                getF32Constant(DAG, 0xbc91e5ac));
   3887       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3888                                getF32Constant(DAG, 0x3e4350aa));
   3889       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3890       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3891                                getF32Constant(DAG, 0x3f60d3e3));
   3892       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3893       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3894                                getF32Constant(DAG, 0x4011cdf0));
   3895       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3896       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3897                                getF32Constant(DAG, 0x406cfd1c));
   3898       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   3899       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   3900                                getF32Constant(DAG, 0x408797cb));
   3901       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   3902       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
   3903                                   getF32Constant(DAG, 0x4006dcab));
   3904     }
   3905 
   3906     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
   3907   }
   3908 
   3909   // No special expansion.
   3910   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
   3911 }
   3912 
   3913 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
   3914 /// limited-precision mode.
   3915 static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
   3916                           const TargetLowering &TLI) {
   3917   if (Op.getValueType() == MVT::f32 &&
   3918       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   3919     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
   3920 
   3921     // Get the exponent.
   3922     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
   3923 
   3924     // Get the significand and build it into a floating-point number with
   3925     // exponent of 1.
   3926     SDValue X = GetSignificand(DAG, Op1, dl);
   3927 
   3928     // Different possible minimax approximations of significand in
   3929     // floating-point for various degrees of accuracy over [1,2].
   3930     SDValue Log2ofMantissa;
   3931     if (LimitFloatPrecision <= 6) {
   3932       // For floating-point precision of 6:
   3933       //
   3934       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
   3935       //
   3936       // error 0.0049451742, which is more than 7 bits
   3937       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3938                                getF32Constant(DAG, 0xbeb08fe0));
   3939       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3940                                getF32Constant(DAG, 0x40019463));
   3941       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3942       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3943                                    getF32Constant(DAG, 0x3fd6633d));
   3944     } else if (LimitFloatPrecision <= 12) {
   3945       // For floating-point precision of 12:
   3946       //
   3947       //   Log2ofMantissa =
   3948       //     -2.51285454f +
   3949       //       (4.07009056f +
   3950       //         (-2.12067489f +
   3951       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
   3952       //
   3953       // error 0.0000876136000, which is better than 13 bits
   3954       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3955                                getF32Constant(DAG, 0xbda7262e));
   3956       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3957                                getF32Constant(DAG, 0x3f25280b));
   3958       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3959       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3960                                getF32Constant(DAG, 0x4007b923));
   3961       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3962       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3963                                getF32Constant(DAG, 0x40823e2f));
   3964       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3965       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3966                                    getF32Constant(DAG, 0x4020d29c));
   3967     } else { // LimitFloatPrecision <= 18
   3968       // For floating-point precision of 18:
   3969       //
   3970       //   Log2ofMantissa =
   3971       //     -3.0400495f +
   3972       //       (6.1129976f +
   3973       //         (-5.3420409f +
   3974       //           (3.2865683f +
   3975       //             (-1.2669343f +
   3976       //               (0.27515199f -
   3977       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
   3978       //
   3979       // error 0.0000018516, which is better than 18 bits
   3980       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3981                                getF32Constant(DAG, 0xbcd2769e));
   3982       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3983                                getF32Constant(DAG, 0x3e8ce0b9));
   3984       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3985       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3986                                getF32Constant(DAG, 0x3fa22ae7));
   3987       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3988       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3989                                getF32Constant(DAG, 0x40525723));
   3990       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3991       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3992                                getF32Constant(DAG, 0x40aaf200));
   3993       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   3994       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   3995                                getF32Constant(DAG, 0x40c39dad));
   3996       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   3997       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
   3998                                    getF32Constant(DAG, 0x4042902c));
   3999     }
   4000 
   4001     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
   4002   }
   4003 
   4004   // No special expansion.
   4005   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
   4006 }
   4007 
   4008 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
   4009 /// limited-precision mode.
   4010 static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG,
   4011                            const TargetLowering &TLI) {
   4012   if (Op.getValueType() == MVT::f32 &&
   4013       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   4014     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
   4015 
   4016     // Scale the exponent by log10(2) [0.30102999f].
   4017     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
   4018     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
   4019                                         getF32Constant(DAG, 0x3e9a209a));
   4020 
   4021     // Get the significand and build it into a floating-point number with
   4022     // exponent of 1.
   4023     SDValue X = GetSignificand(DAG, Op1, dl);
   4024 
   4025     SDValue Log10ofMantissa;
   4026     if (LimitFloatPrecision <= 6) {
   4027       // For floating-point precision of 6:
   4028       //
   4029       //   Log10ofMantissa =
   4030       //     -0.50419619f +
   4031       //       (0.60948995f - 0.10380950f * x) * x;
   4032       //
   4033       // error 0.0014886165, which is 6 bits
   4034       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4035                                getF32Constant(DAG, 0xbdd49a13));
   4036       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   4037                                getF32Constant(DAG, 0x3f1c0789));
   4038       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   4039       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   4040                                     getF32Constant(DAG, 0x3f011300));
   4041     } else if (LimitFloatPrecision <= 12) {
   4042       // For floating-point precision of 12:
   4043       //
   4044       //   Log10ofMantissa =
   4045       //     -0.64831180f +
   4046       //       (0.91751397f +
   4047       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
   4048       //
   4049       // error 0.00019228036, which is better than 12 bits
   4050       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4051                                getF32Constant(DAG, 0x3d431f31));
   4052       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
   4053                                getF32Constant(DAG, 0x3ea21fb2));
   4054       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   4055       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4056                                getF32Constant(DAG, 0x3f6ae232));
   4057       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4058       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
   4059                                     getF32Constant(DAG, 0x3f25f7c3));
   4060     } else { // LimitFloatPrecision <= 18
   4061       // For floating-point precision of 18:
   4062       //
   4063       //   Log10ofMantissa =
   4064       //     -0.84299375f +
   4065       //       (1.5327582f +
   4066       //         (-1.0688956f +
   4067       //           (0.49102474f +
   4068       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
   4069       //
   4070       // error 0.0000037995730, which is better than 18 bits
   4071       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4072                                getF32Constant(DAG, 0x3c5d51ce));
   4073       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
   4074                                getF32Constant(DAG, 0x3e00685a));
   4075       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   4076       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4077                                getF32Constant(DAG, 0x3efb6798));
   4078       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4079       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
   4080                                getF32Constant(DAG, 0x3f88d192));
   4081       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4082       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4083                                getF32Constant(DAG, 0x3fc4316c));
   4084       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   4085       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
   4086                                     getF32Constant(DAG, 0x3f57ce70));
   4087     }
   4088 
   4089     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
   4090   }
   4091 
   4092   // No special expansion.
   4093   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
   4094 }
   4095 
   4096 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
   4097 /// limited-precision mode.
   4098 static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
   4099                           const TargetLowering &TLI) {
   4100   if (Op.getValueType() == MVT::f32 &&
   4101       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   4102     SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
   4103 
   4104     //   FractionalPartOfX = x - (float)IntegerPartOfX;
   4105     SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
   4106     SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
   4107 
   4108     //   IntegerPartOfX <<= 23;
   4109     IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
   4110                                  DAG.getConstant(23, TLI.getPointerTy()));
   4111 
   4112     SDValue TwoToFractionalPartOfX;
   4113     if (LimitFloatPrecision <= 6) {
   4114       // For floating-point precision of 6:
   4115       //
   4116       //   TwoToFractionalPartOfX =
   4117       //     0.997535578f +
   4118       //       (0.735607626f + 0.252464424f * x) * x;
   4119       //
   4120       // error 0.0144103317, which is 6 bits
   4121       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4122                                getF32Constant(DAG, 0x3e814304));
   4123       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4124                                getF32Constant(DAG, 0x3f3c50c8));
   4125       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4126       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4127                                            getF32Constant(DAG, 0x3f7f5e7e));
   4128     } else if (LimitFloatPrecision <= 12) {
   4129       // For floating-point precision of 12:
   4130       //
   4131       //   TwoToFractionalPartOfX =
   4132       //     0.999892986f +
   4133       //       (0.696457318f +
   4134       //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
   4135       //
   4136       // error 0.000107046256, which is 13 to 14 bits
   4137       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4138                                getF32Constant(DAG, 0x3da235e3));
   4139       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4140                                getF32Constant(DAG, 0x3e65b8f3));
   4141       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4142       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4143                                getF32Constant(DAG, 0x3f324b07));
   4144       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4145       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4146                                            getF32Constant(DAG, 0x3f7ff8fd));
   4147     } else { // LimitFloatPrecision <= 18
   4148       // For floating-point precision of 18:
   4149       //
   4150       //   TwoToFractionalPartOfX =
   4151       //     0.999999982f +
   4152       //       (0.693148872f +
   4153       //         (0.240227044f +
   4154       //           (0.554906021e-1f +
   4155       //             (0.961591928e-2f +
   4156       //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
   4157       // error 2.47208000*10^(-7), which is better than 18 bits
   4158       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4159                                getF32Constant(DAG, 0x3924b03e));
   4160       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4161                                getF32Constant(DAG, 0x3ab24b87));
   4162       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4163       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4164                                getF32Constant(DAG, 0x3c1d8c17));
   4165       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4166       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4167                                getF32Constant(DAG, 0x3d634a1d));
   4168       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   4169       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   4170                                getF32Constant(DAG, 0x3e75fe14));
   4171       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   4172       SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
   4173                                 getF32Constant(DAG, 0x3f317234));
   4174       SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
   4175       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
   4176                                            getF32Constant(DAG, 0x3f800000));
   4177     }
   4178 
   4179     // Add the exponent into the result in integer domain.
   4180     SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32,
   4181                               TwoToFractionalPartOfX);
   4182     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
   4183                        DAG.getNode(ISD::ADD, dl, MVT::i32,
   4184                                    t13, IntegerPartOfX));
   4185   }
   4186 
   4187   // No special expansion.
   4188   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
   4189 }
   4190 
   4191 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
   4192 /// limited-precision mode with x == 10.0f.
   4193 static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
   4194                          SelectionDAG &DAG, const TargetLowering &TLI) {
   4195   bool IsExp10 = false;
   4196   if (LHS.getValueType() == MVT::f32 && LHS.getValueType() == MVT::f32 &&
   4197       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   4198     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
   4199       APFloat Ten(10.0f);
   4200       IsExp10 = LHSC->isExactlyValue(Ten);
   4201     }
   4202   }
   4203 
   4204   if (IsExp10) {
   4205     // Put the exponent in the right bit position for later addition to the
   4206     // final result:
   4207     //
   4208     //   #define LOG2OF10 3.3219281f
   4209     //   IntegerPartOfX = (int32_t)(x * LOG2OF10);
   4210     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
   4211                              getF32Constant(DAG, 0x40549a78));
   4212     SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
   4213 
   4214     //   FractionalPartOfX = x - (float)IntegerPartOfX;
   4215     SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
   4216     SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
   4217 
   4218     //   IntegerPartOfX <<= 23;
   4219     IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
   4220                                  DAG.getConstant(23, TLI.getPointerTy()));
   4221 
   4222     SDValue TwoToFractionalPartOfX;
   4223     if (LimitFloatPrecision <= 6) {
   4224       // For floating-point precision of 6:
   4225       //
   4226       //   twoToFractionalPartOfX =
   4227       //     0.997535578f +
   4228       //       (0.735607626f + 0.252464424f * x) * x;
   4229       //
   4230       // error 0.0144103317, which is 6 bits
   4231       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4232                                getF32Constant(DAG, 0x3e814304));
   4233       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4234                                getF32Constant(DAG, 0x3f3c50c8));
   4235       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4236       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4237                                            getF32Constant(DAG, 0x3f7f5e7e));
   4238     } else if (LimitFloatPrecision <= 12) {
   4239       // For floating-point precision of 12:
   4240       //
   4241       //   TwoToFractionalPartOfX =
   4242       //     0.999892986f +
   4243       //       (0.696457318f +
   4244       //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
   4245       //
   4246       // error 0.000107046256, which is 13 to 14 bits
   4247       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4248                                getF32Constant(DAG, 0x3da235e3));
   4249       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4250                                getF32Constant(DAG, 0x3e65b8f3));
   4251       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4252       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4253                                getF32Constant(DAG, 0x3f324b07));
   4254       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4255       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4256                                            getF32Constant(DAG, 0x3f7ff8fd));
   4257     } else { // LimitFloatPrecision <= 18
   4258       // For floating-point precision of 18:
   4259       //
   4260       //   TwoToFractionalPartOfX =
   4261       //     0.999999982f +
   4262       //       (0.693148872f +
   4263       //         (0.240227044f +
   4264       //           (0.554906021e-1f +
   4265       //             (0.961591928e-2f +
   4266       //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
   4267       // error 2.47208000*10^(-7), which is better than 18 bits
   4268       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4269                                getF32Constant(DAG, 0x3924b03e));
   4270       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4271                                getF32Constant(DAG, 0x3ab24b87));
   4272       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4273       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4274                                getF32Constant(DAG, 0x3c1d8c17));
   4275       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4276       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4277                                getF32Constant(DAG, 0x3d634a1d));
   4278       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   4279       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   4280                                getF32Constant(DAG, 0x3e75fe14));
   4281       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   4282       SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
   4283                                 getF32Constant(DAG, 0x3f317234));
   4284       SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
   4285       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
   4286                                            getF32Constant(DAG, 0x3f800000));
   4287     }
   4288 
   4289     SDValue t13 = DAG.getNode(ISD::BITCAST, dl,MVT::i32,TwoToFractionalPartOfX);
   4290     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
   4291                        DAG.getNode(ISD::ADD, dl, MVT::i32,
   4292                                    t13, IntegerPartOfX));
   4293   }
   4294 
   4295   // No special expansion.
   4296   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
   4297 }
   4298 
   4299 
   4300 /// ExpandPowI - Expand a llvm.powi intrinsic.
   4301 static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
   4302                           SelectionDAG &DAG) {
   4303   // If RHS is a constant, we can expand this out to a multiplication tree,
   4304   // otherwise we end up lowering to a call to __powidf2 (for example).  When
   4305   // optimizing for size, we only want to do this if the expansion would produce
   4306   // a small number of multiplies, otherwise we do the full expansion.
   4307   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
   4308     // Get the exponent as a positive value.
   4309     unsigned Val = RHSC->getSExtValue();
   4310     if ((int)Val < 0) Val = -Val;
   4311 
   4312     // powi(x, 0) -> 1.0
   4313     if (Val == 0)
   4314       return DAG.getConstantFP(1.0, LHS.getValueType());
   4315 
   4316     const Function *F = DAG.getMachineFunction().getFunction();
   4317     if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
   4318                                          Attribute::OptimizeForSize) ||
   4319         // If optimizing for size, don't insert too many multiplies.  This
   4320         // inserts up to 5 multiplies.
   4321         CountPopulation_32(Val)+Log2_32(Val) < 7) {
   4322       // We use the simple binary decomposition method to generate the multiply
   4323       // sequence.  There are more optimal ways to do this (for example,
   4324       // powi(x,15) generates one more multiply than it should), but this has
   4325       // the benefit of being both really simple and much better than a libcall.
   4326       SDValue Res;  // Logically starts equal to 1.0
   4327       SDValue CurSquare = LHS;
   4328       while (Val) {
   4329         if (Val & 1) {
   4330           if (Res.getNode())
   4331             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
   4332           else
   4333             Res = CurSquare;  // 1.0*CurSquare.
   4334         }
   4335 
   4336         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
   4337                                 CurSquare, CurSquare);
   4338         Val >>= 1;
   4339       }
   4340 
   4341       // If the original was negative, invert the result, producing 1/(x*x*x).
   4342       if (RHSC->getSExtValue() < 0)
   4343         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
   4344                           DAG.getConstantFP(1.0, LHS.getValueType()), Res);
   4345       return Res;
   4346     }
   4347   }
   4348 
   4349   // Otherwise, expand to a libcall.
   4350   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
   4351 }
   4352 
   4353 // getTruncatedArgReg - Find underlying register used for an truncated
   4354 // argument.
   4355 static unsigned getTruncatedArgReg(const SDValue &N) {
   4356   if (N.getOpcode() != ISD::TRUNCATE)
   4357     return 0;
   4358 
   4359   const SDValue &Ext = N.getOperand(0);
   4360   if (Ext.getOpcode() == ISD::AssertZext ||
   4361       Ext.getOpcode() == ISD::AssertSext) {
   4362     const SDValue &CFR = Ext.getOperand(0);
   4363     if (CFR.getOpcode() == ISD::CopyFromReg)
   4364       return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
   4365     if (CFR.getOpcode() == ISD::TRUNCATE)
   4366       return getTruncatedArgReg(CFR);
   4367   }
   4368   return 0;
   4369 }
   4370 
   4371 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
   4372 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
   4373 /// At the end of instruction selection, they will be inserted to the entry BB.
   4374 bool
   4375 SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
   4376                                               int64_t Offset,
   4377                                               const SDValue &N) {
   4378   const Argument *Arg = dyn_cast<Argument>(V);
   4379   if (!Arg)
   4380     return false;
   4381 
   4382   MachineFunction &MF = DAG.getMachineFunction();
   4383   const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
   4384 
   4385   // Ignore inlined function arguments here.
   4386   DIVariable DV(Variable);
   4387   if (DV.isInlinedFnArgument(MF.getFunction()))
   4388     return false;
   4389 
   4390   Optional<MachineOperand> Op;
   4391   // Some arguments' frame index is recorded during argument lowering.
   4392   if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
   4393     Op = MachineOperand::CreateFI(FI);
   4394 
   4395   if (!Op && N.getNode()) {
   4396     unsigned Reg;
   4397     if (N.getOpcode() == ISD::CopyFromReg)
   4398       Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
   4399     else
   4400       Reg = getTruncatedArgReg(N);
   4401     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
   4402       MachineRegisterInfo &RegInfo = MF.getRegInfo();
   4403       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
   4404       if (PR)
   4405         Reg = PR;
   4406     }
   4407     if (Reg)
   4408       Op = MachineOperand::CreateReg(Reg, false);
   4409   }
   4410 
   4411   if (!Op) {
   4412     // Check if ValueMap has reg number.
   4413     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
   4414     if (VMI != FuncInfo.ValueMap.end())
   4415       Op = MachineOperand::CreateReg(VMI->second, false);
   4416   }
   4417 
   4418   if (!Op && N.getNode())
   4419     // Check if frame index is available.
   4420     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
   4421       if (FrameIndexSDNode *FINode =
   4422           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
   4423         Op = MachineOperand::CreateFI(FINode->getIndex());
   4424 
   4425   if (!Op)
   4426     return false;
   4427 
   4428   // FIXME: This does not handle register-indirect values at offset 0.
   4429   bool IsIndirect = Offset != 0;
   4430   if (Op->isReg())
   4431     FuncInfo.ArgDbgValues.push_back(BuildMI(MF, getCurDebugLoc(),
   4432                                             TII->get(TargetOpcode::DBG_VALUE),
   4433                                             IsIndirect,
   4434                                             Op->getReg(), Offset, Variable));
   4435   else
   4436     FuncInfo.ArgDbgValues.push_back(
   4437       BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE))
   4438           .addOperand(*Op).addImm(Offset).addMetadata(Variable));
   4439 
   4440   return true;
   4441 }
   4442 
   4443 // VisualStudio defines setjmp as _setjmp
   4444 #if defined(_MSC_VER) && defined(setjmp) && \
   4445                          !defined(setjmp_undefined_for_msvc)
   4446 #  pragma push_macro("setjmp")
   4447 #  undef setjmp
   4448 #  define setjmp_undefined_for_msvc
   4449 #endif
   4450 
   4451 /// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
   4452 /// we want to emit this as a call to a named external function, return the name
   4453 /// otherwise lower it and return null.
   4454 const char *
   4455 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
   4456   const TargetLowering *TLI = TM.getTargetLowering();
   4457   SDLoc sdl = getCurSDLoc();
   4458   DebugLoc dl = getCurDebugLoc();
   4459   SDValue Res;
   4460 
   4461   switch (Intrinsic) {
   4462   default:
   4463     // By default, turn this into a target intrinsic node.
   4464     visitTargetIntrinsic(I, Intrinsic);
   4465     return 0;
   4466   case Intrinsic::vastart:  visitVAStart(I); return 0;
   4467   case Intrinsic::vaend:    visitVAEnd(I); return 0;
   4468   case Intrinsic::vacopy:   visitVACopy(I); return 0;
   4469   case Intrinsic::returnaddress:
   4470     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI->getPointerTy(),
   4471                              getValue(I.getArgOperand(0))));
   4472     return 0;
   4473   case Intrinsic::frameaddress:
   4474     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI->getPointerTy(),
   4475                              getValue(I.getArgOperand(0))));
   4476     return 0;
   4477   case Intrinsic::setjmp:
   4478     return &"_setjmp"[!TLI->usesUnderscoreSetJmp()];
   4479   case Intrinsic::longjmp:
   4480     return &"_longjmp"[!TLI->usesUnderscoreLongJmp()];
   4481   case Intrinsic::memcpy: {
   4482     // Assert for address < 256 since we support only user defined address
   4483     // spaces.
   4484     assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
   4485            < 256 &&
   4486            cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
   4487            < 256 &&
   4488            "Unknown address space");
   4489     SDValue Op1 = getValue(I.getArgOperand(0));
   4490     SDValue Op2 = getValue(I.getArgOperand(1));
   4491     SDValue Op3 = getValue(I.getArgOperand(2));
   4492     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
   4493     if (!Align)
   4494       Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
   4495     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
   4496     DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false,
   4497                               MachinePointerInfo(I.getArgOperand(0)),
   4498                               MachinePointerInfo(I.getArgOperand(1))));
   4499     return 0;
   4500   }
   4501   case Intrinsic::memset: {
   4502     // Assert for address < 256 since we support only user defined address
   4503     // spaces.
   4504     assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
   4505            < 256 &&
   4506            "Unknown address space");
   4507     SDValue Op1 = getValue(I.getArgOperand(0));
   4508     SDValue Op2 = getValue(I.getArgOperand(1));
   4509     SDValue Op3 = getValue(I.getArgOperand(2));
   4510     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
   4511     if (!Align)
   4512       Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
   4513     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
   4514     DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
   4515                               MachinePointerInfo(I.getArgOperand(0))));
   4516     return 0;
   4517   }
   4518   case Intrinsic::memmove: {
   4519     // Assert for address < 256 since we support only user defined address
   4520     // spaces.
   4521     assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
   4522            < 256 &&
   4523            cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
   4524            < 256 &&
   4525            "Unknown address space");
   4526     SDValue Op1 = getValue(I.getArgOperand(0));
   4527     SDValue Op2 = getValue(I.getArgOperand(1));
   4528     SDValue Op3 = getValue(I.getArgOperand(2));
   4529     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
   4530     if (!Align)
   4531       Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
   4532     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
   4533     DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
   4534                                MachinePointerInfo(I.getArgOperand(0)),
   4535                                MachinePointerInfo(I.getArgOperand(1))));
   4536     return 0;
   4537   }
   4538   case Intrinsic::dbg_declare: {
   4539     const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
   4540     MDNode *Variable = DI.getVariable();
   4541     const Value *Address = DI.getAddress();
   4542     DIVariable DIVar(Variable);
   4543     assert((!DIVar || DIVar.isVariable()) &&
   4544       "Variable in DbgDeclareInst should be either null or a DIVariable.");
   4545     if (!Address || !DIVar) {
   4546       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4547       return 0;
   4548     }
   4549 
   4550     // Check if address has undef value.
   4551     if (isa<UndefValue>(Address) ||
   4552         (Address->use_empty() && !isa<Argument>(Address))) {
   4553       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4554       return 0;
   4555     }
   4556 
   4557     SDValue &N = NodeMap[Address];
   4558     if (!N.getNode() && isa<Argument>(Address))
   4559       // Check unused arguments map.
   4560       N = UnusedArgNodeMap[Address];
   4561     SDDbgValue *SDV;
   4562     if (N.getNode()) {
   4563       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
   4564         Address = BCI->getOperand(0);
   4565       // Parameters are handled specially.
   4566       bool isParameter =
   4567         (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
   4568          isa<Argument>(Address));
   4569 
   4570       const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
   4571 
   4572       if (isParameter && !AI) {
   4573         FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
   4574         if (FINode)
   4575           // Byval parameter.  We have a frame index at this point.
   4576           SDV = DAG.getDbgValue(Variable, FINode->getIndex(),
   4577                                 0, dl, SDNodeOrder);
   4578         else {
   4579           // Address is an argument, so try to emit its dbg value using
   4580           // virtual register info from the FuncInfo.ValueMap.
   4581           EmitFuncArgumentDbgValue(Address, Variable, 0, N);
   4582           return 0;
   4583         }
   4584       } else if (AI)
   4585         SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
   4586                               0, dl, SDNodeOrder);
   4587       else {
   4588         // Can't do anything with other non-AI cases yet.
   4589         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4590         DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
   4591         DEBUG(Address->dump());
   4592         return 0;
   4593       }
   4594       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
   4595     } else {
   4596       // If Address is an argument then try to emit its dbg value using
   4597       // virtual register info from the FuncInfo.ValueMap.
   4598       if (!EmitFuncArgumentDbgValue(Address, Variable, 0, N)) {
   4599         // If variable is pinned by a alloca in dominating bb then
   4600         // use StaticAllocaMap.
   4601         if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
   4602           if (AI->getParent() != DI.getParent()) {
   4603             DenseMap<const AllocaInst*, int>::iterator SI =
   4604               FuncInfo.StaticAllocaMap.find(AI);
   4605             if (SI != FuncInfo.StaticAllocaMap.end()) {
   4606               SDV = DAG.getDbgValue(Variable, SI->second,
   4607                                     0, dl, SDNodeOrder);
   4608               DAG.AddDbgValue(SDV, 0, false);
   4609               return 0;
   4610             }
   4611           }
   4612         }
   4613         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4614       }
   4615     }
   4616     return 0;
   4617   }
   4618   case Intrinsic::dbg_value: {
   4619     const DbgValueInst &DI = cast<DbgValueInst>(I);
   4620     DIVariable DIVar(DI.getVariable());
   4621     assert((!DIVar || DIVar.isVariable()) &&
   4622       "Variable in DbgValueInst should be either null or a DIVariable.");
   4623     if (!DIVar)
   4624       return 0;
   4625 
   4626     MDNode *Variable = DI.getVariable();
   4627     uint64_t Offset = DI.getOffset();
   4628     const Value *V = DI.getValue();
   4629     if (!V)
   4630       return 0;
   4631 
   4632     SDDbgValue *SDV;
   4633     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
   4634       SDV = DAG.getDbgValue(Variable, V, Offset, dl, SDNodeOrder);
   4635       DAG.AddDbgValue(SDV, 0, false);
   4636     } else {
   4637       // Do not use getValue() in here; we don't want to generate code at
   4638       // this point if it hasn't been done yet.
   4639       SDValue N = NodeMap[V];
   4640       if (!N.getNode() && isa<Argument>(V))
   4641         // Check unused arguments map.
   4642         N = UnusedArgNodeMap[V];
   4643       if (N.getNode()) {
   4644         if (!EmitFuncArgumentDbgValue(V, Variable, Offset, N)) {
   4645           SDV = DAG.getDbgValue(Variable, N.getNode(),
   4646                                 N.getResNo(), Offset, dl, SDNodeOrder);
   4647           DAG.AddDbgValue(SDV, N.getNode(), false);
   4648         }
   4649       } else if (!V->use_empty() ) {
   4650         // Do not call getValue(V) yet, as we don't want to generate code.
   4651         // Remember it for later.
   4652         DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
   4653         DanglingDebugInfoMap[V] = DDI;
   4654       } else {
   4655         // We may expand this to cover more cases.  One case where we have no
   4656         // data available is an unreferenced parameter.
   4657         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4658       }
   4659     }
   4660 
   4661     // Build a debug info table entry.
   4662     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
   4663       V = BCI->getOperand(0);
   4664     const AllocaInst *AI = dyn_cast<AllocaInst>(V);
   4665     // Don't handle byval struct arguments or VLAs, for example.
   4666     if (!AI) {
   4667       DEBUG(dbgs() << "Dropping debug location info for:\n  " << DI << "\n");
   4668       DEBUG(dbgs() << "  Last seen at:\n    " << *V << "\n");
   4669       return 0;
   4670     }
   4671     DenseMap<const AllocaInst*, int>::iterator SI =
   4672       FuncInfo.StaticAllocaMap.find(AI);
   4673     if (SI == FuncInfo.StaticAllocaMap.end())
   4674       return 0; // VLAs.
   4675     int FI = SI->second;
   4676 
   4677     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   4678     if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo())
   4679       MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
   4680     return 0;
   4681   }
   4682 
   4683   case Intrinsic::eh_typeid_for: {
   4684     // Find the type id for the given typeinfo.
   4685     GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
   4686     unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
   4687     Res = DAG.getConstant(TypeID, MVT::i32);
   4688     setValue(&I, Res);
   4689     return 0;
   4690   }
   4691 
   4692   case Intrinsic::eh_return_i32:
   4693   case Intrinsic::eh_return_i64:
   4694     DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
   4695     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
   4696                             MVT::Other,
   4697                             getControlRoot(),
   4698                             getValue(I.getArgOperand(0)),
   4699                             getValue(I.getArgOperand(1))));
   4700     return 0;
   4701   case Intrinsic::eh_unwind_init:
   4702     DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
   4703     return 0;
   4704   case Intrinsic::eh_dwarf_cfa: {
   4705     SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
   4706                                         TLI->getPointerTy());
   4707     SDValue Offset = DAG.getNode(ISD::ADD, sdl,
   4708                                  TLI->getPointerTy(),
   4709                                  DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl,
   4710                                              TLI->getPointerTy()),
   4711                                  CfaArg);
   4712     SDValue FA = DAG.getNode(ISD::FRAMEADDR, sdl,
   4713                              TLI->getPointerTy(),
   4714                              DAG.getConstant(0, TLI->getPointerTy()));
   4715     setValue(&I, DAG.getNode(ISD::ADD, sdl, TLI->getPointerTy(),
   4716                              FA, Offset));
   4717     return 0;
   4718   }
   4719   case Intrinsic::eh_sjlj_callsite: {
   4720     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   4721     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
   4722     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
   4723     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
   4724 
   4725     MMI.setCurrentCallSite(CI->getZExtValue());
   4726     return 0;
   4727   }
   4728   case Intrinsic::eh_sjlj_functioncontext: {
   4729     // Get and store the index of the function context.
   4730     MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
   4731     AllocaInst *FnCtx =
   4732       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
   4733     int FI = FuncInfo.StaticAllocaMap[FnCtx];
   4734     MFI->setFunctionContextIndex(FI);
   4735     return 0;
   4736   }
   4737   case Intrinsic::eh_sjlj_setjmp: {
   4738     SDValue Ops[2];
   4739     Ops[0] = getRoot();
   4740     Ops[1] = getValue(I.getArgOperand(0));
   4741     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
   4742                              DAG.getVTList(MVT::i32, MVT::Other),
   4743                              Ops, 2);
   4744     setValue(&I, Op.getValue(0));
   4745     DAG.setRoot(Op.getValue(1));
   4746     return 0;
   4747   }
   4748   case Intrinsic::eh_sjlj_longjmp: {
   4749     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
   4750                             getRoot(), getValue(I.getArgOperand(0))));
   4751     return 0;
   4752   }
   4753 
   4754   case Intrinsic::x86_mmx_pslli_w:
   4755   case Intrinsic::x86_mmx_pslli_d:
   4756   case Intrinsic::x86_mmx_pslli_q:
   4757   case Intrinsic::x86_mmx_psrli_w:
   4758   case Intrinsic::x86_mmx_psrli_d:
   4759   case Intrinsic::x86_mmx_psrli_q:
   4760   case Intrinsic::x86_mmx_psrai_w:
   4761   case Intrinsic::x86_mmx_psrai_d: {
   4762     SDValue ShAmt = getValue(I.getArgOperand(1));
   4763     if (isa<ConstantSDNode>(ShAmt)) {
   4764       visitTargetIntrinsic(I, Intrinsic);
   4765       return 0;
   4766     }
   4767     unsigned NewIntrinsic = 0;
   4768     EVT ShAmtVT = MVT::v2i32;
   4769     switch (Intrinsic) {
   4770     case Intrinsic::x86_mmx_pslli_w:
   4771       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
   4772       break;
   4773     case Intrinsic::x86_mmx_pslli_d:
   4774       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
   4775       break;
   4776     case Intrinsic::x86_mmx_pslli_q:
   4777       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
   4778       break;
   4779     case Intrinsic::x86_mmx_psrli_w:
   4780       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
   4781       break;
   4782     case Intrinsic::x86_mmx_psrli_d:
   4783       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
   4784       break;
   4785     case Intrinsic::x86_mmx_psrli_q:
   4786       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
   4787       break;
   4788     case Intrinsic::x86_mmx_psrai_w:
   4789       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
   4790       break;
   4791     case Intrinsic::x86_mmx_psrai_d:
   4792       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
   4793       break;
   4794     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   4795     }
   4796 
   4797     // The vector shift intrinsics with scalars uses 32b shift amounts but
   4798     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
   4799     // to be zero.
   4800     // We must do this early because v2i32 is not a legal type.
   4801     SDValue ShOps[2];
   4802     ShOps[0] = ShAmt;
   4803     ShOps[1] = DAG.getConstant(0, MVT::i32);
   4804     ShAmt =  DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, &ShOps[0], 2);
   4805     EVT DestVT = TLI->getValueType(I.getType());
   4806     ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
   4807     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
   4808                        DAG.getConstant(NewIntrinsic, MVT::i32),
   4809                        getValue(I.getArgOperand(0)), ShAmt);
   4810     setValue(&I, Res);
   4811     return 0;
   4812   }
   4813   case Intrinsic::x86_avx_vinsertf128_pd_256:
   4814   case Intrinsic::x86_avx_vinsertf128_ps_256:
   4815   case Intrinsic::x86_avx_vinsertf128_si_256:
   4816   case Intrinsic::x86_avx2_vinserti128: {
   4817     EVT DestVT = TLI->getValueType(I.getType());
   4818     EVT ElVT = TLI->getValueType(I.getArgOperand(1)->getType());
   4819     uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
   4820                    ElVT.getVectorNumElements();
   4821     Res = DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, DestVT,
   4822                       getValue(I.getArgOperand(0)),
   4823                       getValue(I.getArgOperand(1)),
   4824                       DAG.getConstant(Idx, TLI->getVectorIdxTy()));
   4825     setValue(&I, Res);
   4826     return 0;
   4827   }
   4828   case Intrinsic::x86_avx_vextractf128_pd_256:
   4829   case Intrinsic::x86_avx_vextractf128_ps_256:
   4830   case Intrinsic::x86_avx_vextractf128_si_256:
   4831   case Intrinsic::x86_avx2_vextracti128: {
   4832     EVT DestVT = TLI->getValueType(I.getType());
   4833     uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
   4834                    DestVT.getVectorNumElements();
   4835     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, DestVT,
   4836                       getValue(I.getArgOperand(0)),
   4837                       DAG.getConstant(Idx, TLI->getVectorIdxTy()));
   4838     setValue(&I, Res);
   4839     return 0;
   4840   }
   4841   case Intrinsic::convertff:
   4842   case Intrinsic::convertfsi:
   4843   case Intrinsic::convertfui:
   4844   case Intrinsic::convertsif:
   4845   case Intrinsic::convertuif:
   4846   case Intrinsic::convertss:
   4847   case Intrinsic::convertsu:
   4848   case Intrinsic::convertus:
   4849   case Intrinsic::convertuu: {
   4850     ISD::CvtCode Code = ISD::CVT_INVALID;
   4851     switch (Intrinsic) {
   4852     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   4853     case Intrinsic::convertff:  Code = ISD::CVT_FF; break;
   4854     case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
   4855     case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
   4856     case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
   4857     case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
   4858     case Intrinsic::convertss:  Code = ISD::CVT_SS; break;
   4859     case Intrinsic::convertsu:  Code = ISD::CVT_SU; break;
   4860     case Intrinsic::convertus:  Code = ISD::CVT_US; break;
   4861     case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
   4862     }
   4863     EVT DestVT = TLI->getValueType(I.getType());
   4864     const Value *Op1 = I.getArgOperand(0);
   4865     Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1),
   4866                                DAG.getValueType(DestVT),
   4867                                DAG.getValueType(getValue(Op1).getValueType()),
   4868                                getValue(I.getArgOperand(1)),
   4869                                getValue(I.getArgOperand(2)),
   4870                                Code);
   4871     setValue(&I, Res);
   4872     return 0;
   4873   }
   4874   case Intrinsic::powi:
   4875     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
   4876                             getValue(I.getArgOperand(1)), DAG));
   4877     return 0;
   4878   case Intrinsic::log:
   4879     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
   4880     return 0;
   4881   case Intrinsic::log2:
   4882     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
   4883     return 0;
   4884   case Intrinsic::log10:
   4885     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
   4886     return 0;
   4887   case Intrinsic::exp:
   4888     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
   4889     return 0;
   4890   case Intrinsic::exp2:
   4891     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
   4892     return 0;
   4893   case Intrinsic::pow:
   4894     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
   4895                            getValue(I.getArgOperand(1)), DAG, *TLI));
   4896     return 0;
   4897   case Intrinsic::sqrt:
   4898   case Intrinsic::fabs:
   4899   case Intrinsic::sin:
   4900   case Intrinsic::cos:
   4901   case Intrinsic::floor:
   4902   case Intrinsic::ceil:
   4903   case Intrinsic::trunc:
   4904   case Intrinsic::rint:
   4905   case Intrinsic::nearbyint: {
   4906     unsigned Opcode;
   4907     switch (Intrinsic) {
   4908     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   4909     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
   4910     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
   4911     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
   4912     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
   4913     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
   4914     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
   4915     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
   4916     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
   4917     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
   4918     }
   4919 
   4920     setValue(&I, DAG.getNode(Opcode, sdl,
   4921                              getValue(I.getArgOperand(0)).getValueType(),
   4922                              getValue(I.getArgOperand(0))));
   4923     return 0;
   4924   }
   4925   case Intrinsic::fma:
   4926     setValue(&I, DAG.getNode(ISD::FMA, sdl,
   4927                              getValue(I.getArgOperand(0)).getValueType(),
   4928                              getValue(I.getArgOperand(0)),
   4929                              getValue(I.getArgOperand(1)),
   4930                              getValue(I.getArgOperand(2))));
   4931     return 0;
   4932   case Intrinsic::fmuladd: {
   4933     EVT VT = TLI->getValueType(I.getType());
   4934     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
   4935         TLI->isFMAFasterThanFMulAndFAdd(VT)) {
   4936       setValue(&I, DAG.getNode(ISD::FMA, sdl,
   4937                                getValue(I.getArgOperand(0)).getValueType(),
   4938                                getValue(I.getArgOperand(0)),
   4939                                getValue(I.getArgOperand(1)),
   4940                                getValue(I.getArgOperand(2))));
   4941     } else {
   4942       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
   4943                                 getValue(I.getArgOperand(0)).getValueType(),
   4944                                 getValue(I.getArgOperand(0)),
   4945                                 getValue(I.getArgOperand(1)));
   4946       SDValue Add = DAG.getNode(ISD::FADD, sdl,
   4947                                 getValue(I.getArgOperand(0)).getValueType(),
   4948                                 Mul,
   4949                                 getValue(I.getArgOperand(2)));
   4950       setValue(&I, Add);
   4951     }
   4952     return 0;
   4953   }
   4954   case Intrinsic::convert_to_fp16:
   4955     setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, sdl,
   4956                              MVT::i16, getValue(I.getArgOperand(0))));
   4957     return 0;
   4958   case Intrinsic::convert_from_fp16:
   4959     setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, sdl,
   4960                              MVT::f32, getValue(I.getArgOperand(0))));
   4961     return 0;
   4962   case Intrinsic::pcmarker: {
   4963     SDValue Tmp = getValue(I.getArgOperand(0));
   4964     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
   4965     return 0;
   4966   }
   4967   case Intrinsic::readcyclecounter: {
   4968     SDValue Op = getRoot();
   4969     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
   4970                       DAG.getVTList(MVT::i64, MVT::Other),
   4971                       &Op, 1);
   4972     setValue(&I, Res);
   4973     DAG.setRoot(Res.getValue(1));
   4974     return 0;
   4975   }
   4976   case Intrinsic::bswap:
   4977     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
   4978                              getValue(I.getArgOperand(0)).getValueType(),
   4979                              getValue(I.getArgOperand(0))));
   4980     return 0;
   4981   case Intrinsic::cttz: {
   4982     SDValue Arg = getValue(I.getArgOperand(0));
   4983     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
   4984     EVT Ty = Arg.getValueType();
   4985     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
   4986                              sdl, Ty, Arg));
   4987     return 0;
   4988   }
   4989   case Intrinsic::ctlz: {
   4990     SDValue Arg = getValue(I.getArgOperand(0));
   4991     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
   4992     EVT Ty = Arg.getValueType();
   4993     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
   4994                              sdl, Ty, Arg));
   4995     return 0;
   4996   }
   4997   case Intrinsic::ctpop: {
   4998     SDValue Arg = getValue(I.getArgOperand(0));
   4999     EVT Ty = Arg.getValueType();
   5000     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
   5001     return 0;
   5002   }
   5003   case Intrinsic::stacksave: {
   5004     SDValue Op = getRoot();
   5005     Res = DAG.getNode(ISD::STACKSAVE, sdl,
   5006                       DAG.getVTList(TLI->getPointerTy(), MVT::Other), &Op, 1);
   5007     setValue(&I, Res);
   5008     DAG.setRoot(Res.getValue(1));
   5009     return 0;
   5010   }
   5011   case Intrinsic::stackrestore: {
   5012     Res = getValue(I.getArgOperand(0));
   5013     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
   5014     return 0;
   5015   }
   5016   case Intrinsic::stackprotector: {
   5017     // Emit code into the DAG to store the stack guard onto the stack.
   5018     MachineFunction &MF = DAG.getMachineFunction();
   5019     MachineFrameInfo *MFI = MF.getFrameInfo();
   5020     EVT PtrTy = TLI->getPointerTy();
   5021 
   5022     SDValue Src = getValue(I.getArgOperand(0));   // The guard's value.
   5023     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
   5024 
   5025     int FI = FuncInfo.StaticAllocaMap[Slot];
   5026     MFI->setStackProtectorIndex(FI);
   5027 
   5028     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
   5029 
   5030     // Store the stack protector onto the stack.
   5031     Res = DAG.getStore(getRoot(), sdl, Src, FIN,
   5032                        MachinePointerInfo::getFixedStack(FI),
   5033                        true, false, 0);
   5034     setValue(&I, Res);
   5035     DAG.setRoot(Res);
   5036     return 0;
   5037   }
   5038   case Intrinsic::objectsize: {
   5039     // If we don't know by now, we're never going to know.
   5040     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
   5041 
   5042     assert(CI && "Non-constant type in __builtin_object_size?");
   5043 
   5044     SDValue Arg = getValue(I.getCalledValue());
   5045     EVT Ty = Arg.getValueType();
   5046 
   5047     if (CI->isZero())
   5048       Res = DAG.getConstant(-1ULL, Ty);
   5049     else
   5050       Res = DAG.getConstant(0, Ty);
   5051 
   5052     setValue(&I, Res);
   5053     return 0;
   5054   }
   5055   case Intrinsic::annotation:
   5056   case Intrinsic::ptr_annotation:
   5057     // Drop the intrinsic, but forward the value
   5058     setValue(&I, getValue(I.getOperand(0)));
   5059     return 0;
   5060   case Intrinsic::var_annotation:
   5061     // Discard annotate attributes
   5062     return 0;
   5063 
   5064   case Intrinsic::init_trampoline: {
   5065     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
   5066 
   5067     SDValue Ops[6];
   5068     Ops[0] = getRoot();
   5069     Ops[1] = getValue(I.getArgOperand(0));
   5070     Ops[2] = getValue(I.getArgOperand(1));
   5071     Ops[3] = getValue(I.getArgOperand(2));
   5072     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
   5073     Ops[5] = DAG.getSrcValue(F);
   5074 
   5075     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops, 6);
   5076 
   5077     DAG.setRoot(Res);
   5078     return 0;
   5079   }
   5080   case Intrinsic::adjust_trampoline: {
   5081     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
   5082                              TLI->getPointerTy(),
   5083                              getValue(I.getArgOperand(0))));
   5084     return 0;
   5085   }
   5086   case Intrinsic::gcroot:
   5087     if (GFI) {
   5088       const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
   5089       const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
   5090 
   5091       FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
   5092       GFI->addStackRoot(FI->getIndex(), TypeMap);
   5093     }
   5094     return 0;
   5095   case Intrinsic::gcread:
   5096   case Intrinsic::gcwrite:
   5097     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
   5098   case Intrinsic::flt_rounds:
   5099     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
   5100     return 0;
   5101 
   5102   case Intrinsic::expect: {
   5103     // Just replace __builtin_expect(exp, c) with EXP.
   5104     setValue(&I, getValue(I.getArgOperand(0)));
   5105     return 0;
   5106   }
   5107 
   5108   case Intrinsic::debugtrap:
   5109   case Intrinsic::trap: {
   5110     StringRef TrapFuncName = TM.Options.getTrapFunctionName();
   5111     if (TrapFuncName.empty()) {
   5112       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
   5113         ISD::TRAP : ISD::DEBUGTRAP;
   5114       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
   5115       return 0;
   5116     }
   5117     TargetLowering::ArgListTy Args;
   5118     TargetLowering::
   5119     CallLoweringInfo CLI(getRoot(), I.getType(),
   5120                  false, false, false, false, 0, CallingConv::C,
   5121                  /*isTailCall=*/false,
   5122                  /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
   5123                  DAG.getExternalSymbol(TrapFuncName.data(),
   5124                                        TLI->getPointerTy()),
   5125                  Args, DAG, sdl);
   5126     std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
   5127     DAG.setRoot(Result.second);
   5128     return 0;
   5129   }
   5130 
   5131   case Intrinsic::uadd_with_overflow:
   5132   case Intrinsic::sadd_with_overflow:
   5133   case Intrinsic::usub_with_overflow:
   5134   case Intrinsic::ssub_with_overflow:
   5135   case Intrinsic::umul_with_overflow:
   5136   case Intrinsic::smul_with_overflow: {
   5137     ISD::NodeType Op;
   5138     switch (Intrinsic) {
   5139     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   5140     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
   5141     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
   5142     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
   5143     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
   5144     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
   5145     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
   5146     }
   5147     SDValue Op1 = getValue(I.getArgOperand(0));
   5148     SDValue Op2 = getValue(I.getArgOperand(1));
   5149 
   5150     SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
   5151     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
   5152     return 0;
   5153   }
   5154   case Intrinsic::prefetch: {
   5155     SDValue Ops[5];
   5156     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
   5157     Ops[0] = getRoot();
   5158     Ops[1] = getValue(I.getArgOperand(0));
   5159     Ops[2] = getValue(I.getArgOperand(1));
   5160     Ops[3] = getValue(I.getArgOperand(2));
   5161     Ops[4] = getValue(I.getArgOperand(3));
   5162     DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
   5163                                         DAG.getVTList(MVT::Other),
   5164                                         &Ops[0], 5,
   5165                                         EVT::getIntegerVT(*Context, 8),
   5166                                         MachinePointerInfo(I.getArgOperand(0)),
   5167                                         0, /* align */
   5168                                         false, /* volatile */
   5169                                         rw==0, /* read */
   5170                                         rw==1)); /* write */
   5171     return 0;
   5172   }
   5173   case Intrinsic::lifetime_start:
   5174   case Intrinsic::lifetime_end: {
   5175     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
   5176     // Stack coloring is not enabled in O0, discard region information.
   5177     if (TM.getOptLevel() == CodeGenOpt::None)
   5178       return 0;
   5179 
   5180     SmallVector<Value *, 4> Allocas;
   5181     GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
   5182 
   5183     for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
   5184            E = Allocas.end(); Object != E; ++Object) {
   5185       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
   5186 
   5187       // Could not find an Alloca.
   5188       if (!LifetimeObject)
   5189         continue;
   5190 
   5191       int FI = FuncInfo.StaticAllocaMap[LifetimeObject];
   5192 
   5193       SDValue Ops[2];
   5194       Ops[0] = getRoot();
   5195       Ops[1] = DAG.getFrameIndex(FI, TLI->getPointerTy(), true);
   5196       unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
   5197 
   5198       Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops, 2);
   5199       DAG.setRoot(Res);
   5200     }
   5201     return 0;
   5202   }
   5203   case Intrinsic::invariant_start:
   5204     // Discard region information.
   5205     setValue(&I, DAG.getUNDEF(TLI->getPointerTy()));
   5206     return 0;
   5207   case Intrinsic::invariant_end:
   5208     // Discard region information.
   5209     return 0;
   5210   case Intrinsic::donothing:
   5211     // ignore
   5212     return 0;
   5213   }
   5214 }
   5215 
   5216 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
   5217                                       bool isTailCall,
   5218                                       MachineBasicBlock *LandingPad) {
   5219   PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
   5220   FunctionType *FTy = cast<FunctionType>(PT->getElementType());
   5221   Type *RetTy = FTy->getReturnType();
   5222   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   5223   MCSymbol *BeginLabel = 0;
   5224 
   5225   TargetLowering::ArgListTy Args;
   5226   TargetLowering::ArgListEntry Entry;
   5227   Args.reserve(CS.arg_size());
   5228 
   5229   // Check whether the function can return without sret-demotion.
   5230   SmallVector<ISD::OutputArg, 4> Outs;
   5231   const TargetLowering *TLI = TM.getTargetLowering();
   5232   GetReturnInfo(RetTy, CS.getAttributes(), Outs, *TLI);
   5233 
   5234   bool CanLowerReturn = TLI->CanLowerReturn(CS.getCallingConv(),
   5235                                             DAG.getMachineFunction(),
   5236                                             FTy->isVarArg(), Outs,
   5237                                             FTy->getContext());
   5238 
   5239   SDValue DemoteStackSlot;
   5240   int DemoteStackIdx = -100;
   5241 
   5242   if (!CanLowerReturn) {
   5243     uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(
   5244                       FTy->getReturnType());
   5245     unsigned Align  = TLI->getDataLayout()->getPrefTypeAlignment(
   5246                       FTy->getReturnType());
   5247     MachineFunction &MF = DAG.getMachineFunction();
   5248     DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
   5249     Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
   5250 
   5251     DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI->getPointerTy());
   5252     Entry.Node = DemoteStackSlot;
   5253     Entry.Ty = StackSlotPtrType;
   5254     Entry.isSExt = false;
   5255     Entry.isZExt = false;
   5256     Entry.isInReg = false;
   5257     Entry.isSRet = true;
   5258     Entry.isNest = false;
   5259     Entry.isByVal = false;
   5260     Entry.isReturned = false;
   5261     Entry.Alignment = Align;
   5262     Args.push_back(Entry);
   5263     RetTy = Type::getVoidTy(FTy->getContext());
   5264   }
   5265 
   5266   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
   5267        i != e; ++i) {
   5268     const Value *V = *i;
   5269 
   5270     // Skip empty types
   5271     if (V->getType()->isEmptyTy())
   5272       continue;
   5273 
   5274     SDValue ArgNode = getValue(V);
   5275     Entry.Node = ArgNode; Entry.Ty = V->getType();
   5276 
   5277     unsigned attrInd = i - CS.arg_begin() + 1;
   5278     Entry.isSExt     = CS.paramHasAttr(attrInd, Attribute::SExt);
   5279     Entry.isZExt     = CS.paramHasAttr(attrInd, Attribute::ZExt);
   5280     Entry.isInReg    = CS.paramHasAttr(attrInd, Attribute::InReg);
   5281     Entry.isSRet     = CS.paramHasAttr(attrInd, Attribute::StructRet);
   5282     Entry.isNest     = CS.paramHasAttr(attrInd, Attribute::Nest);
   5283     Entry.isByVal    = CS.paramHasAttr(attrInd, Attribute::ByVal);
   5284     Entry.isReturned = CS.paramHasAttr(attrInd, Attribute::Returned);
   5285     Entry.Alignment  = CS.getParamAlignment(attrInd);
   5286     Args.push_back(Entry);
   5287   }
   5288 
   5289   if (LandingPad) {
   5290     // Insert a label before the invoke call to mark the try range.  This can be
   5291     // used to detect deletion of the invoke via the MachineModuleInfo.
   5292     BeginLabel = MMI.getContext().CreateTempSymbol();
   5293 
   5294     // For SjLj, keep track of which landing pads go with which invokes
   5295     // so as to maintain the ordering of pads in the LSDA.
   5296     unsigned CallSiteIndex = MMI.getCurrentCallSite();
   5297     if (CallSiteIndex) {
   5298       MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
   5299       LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex);
   5300 
   5301       // Now that the call site is handled, stop tracking it.
   5302       MMI.setCurrentCallSite(0);
   5303     }
   5304 
   5305     // Both PendingLoads and PendingExports must be flushed here;
   5306     // this call might not return.
   5307     (void)getRoot();
   5308     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
   5309   }
   5310 
   5311   // Check if target-independent constraints permit a tail call here.
   5312   // Target-dependent constraints are checked within TLI->LowerCallTo.
   5313   if (isTailCall && !isInTailCallPosition(CS, *TLI))
   5314     isTailCall = false;
   5315 
   5316   TargetLowering::
   5317   CallLoweringInfo CLI(getRoot(), RetTy, FTy, isTailCall, Callee, Args, DAG,
   5318                        getCurSDLoc(), CS);
   5319   std::pair<SDValue,SDValue> Result = TLI->LowerCallTo(CLI);
   5320   assert((isTailCall || Result.second.getNode()) &&
   5321          "Non-null chain expected with non-tail call!");
   5322   assert((Result.second.getNode() || !Result.first.getNode()) &&
   5323          "Null value expected with tail call!");
   5324   if (Result.first.getNode()) {
   5325     setValue(CS.getInstruction(), Result.first);
   5326   } else if (!CanLowerReturn && Result.second.getNode()) {
   5327     // The instruction result is the result of loading from the
   5328     // hidden sret parameter.
   5329     SmallVector<EVT, 1> PVTs;
   5330     Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
   5331 
   5332     ComputeValueVTs(*TLI, PtrRetTy, PVTs);
   5333     assert(PVTs.size() == 1 && "Pointers should fit in one register");
   5334     EVT PtrVT = PVTs[0];
   5335 
   5336     SmallVector<EVT, 4> RetTys;
   5337     SmallVector<uint64_t, 4> Offsets;
   5338     RetTy = FTy->getReturnType();
   5339     ComputeValueVTs(*TLI, RetTy, RetTys, &Offsets);
   5340 
   5341     unsigned NumValues = RetTys.size();
   5342     SmallVector<SDValue, 4> Values(NumValues);
   5343     SmallVector<SDValue, 4> Chains(NumValues);
   5344 
   5345     for (unsigned i = 0; i < NumValues; ++i) {
   5346       SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT,
   5347                                 DemoteStackSlot,
   5348                                 DAG.getConstant(Offsets[i], PtrVT));
   5349       SDValue L = DAG.getLoad(RetTys[i], getCurSDLoc(), Result.second, Add,
   5350                   MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
   5351                               false, false, false, 1);
   5352       Values[i] = L;
   5353       Chains[i] = L.getValue(1);
   5354     }
   5355 
   5356     SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
   5357                                 MVT::Other, &Chains[0], NumValues);
   5358     PendingLoads.push_back(Chain);
   5359 
   5360     setValue(CS.getInstruction(),
   5361              DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
   5362                          DAG.getVTList(&RetTys[0], RetTys.size()),
   5363                          &Values[0], Values.size()));
   5364   }
   5365 
   5366   if (!Result.second.getNode()) {
   5367     // As a special case, a null chain means that a tail call has been emitted and
   5368     // the DAG root is already updated.
   5369     HasTailCall = true;
   5370 
   5371     // Since there's no actual continuation from this block, nothing can be
   5372     // relying on us setting vregs for them.
   5373     PendingExports.clear();
   5374   } else {
   5375     DAG.setRoot(Result.second);
   5376   }
   5377 
   5378   if (LandingPad) {
   5379     // Insert a label at the end of the invoke call to mark the try range.  This
   5380     // can be used to detect deletion of the invoke via the MachineModuleInfo.
   5381     MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol();
   5382     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
   5383 
   5384     // Inform MachineModuleInfo of range.
   5385     MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
   5386   }
   5387 }
   5388 
   5389 /// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
   5390 /// value is equal or not-equal to zero.
   5391 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
   5392   for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
   5393        UI != E; ++UI) {
   5394     if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
   5395       if (IC->isEquality())
   5396         if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
   5397           if (C->isNullValue())
   5398             continue;
   5399     // Unknown instruction.
   5400     return false;
   5401   }
   5402   return true;
   5403 }
   5404 
   5405 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
   5406                              Type *LoadTy,
   5407                              SelectionDAGBuilder &Builder) {
   5408 
   5409   // Check to see if this load can be trivially constant folded, e.g. if the
   5410   // input is from a string literal.
   5411   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
   5412     // Cast pointer to the type we really want to load.
   5413     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
   5414                                          PointerType::getUnqual(LoadTy));
   5415 
   5416     if (const Constant *LoadCst =
   5417           ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
   5418                                        Builder.TD))
   5419       return Builder.getValue(LoadCst);
   5420   }
   5421 
   5422   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
   5423   // still constant memory, the input chain can be the entry node.
   5424   SDValue Root;
   5425   bool ConstantMemory = false;
   5426 
   5427   // Do not serialize (non-volatile) loads of constant memory with anything.
   5428   if (Builder.AA->pointsToConstantMemory(PtrVal)) {
   5429     Root = Builder.DAG.getEntryNode();
   5430     ConstantMemory = true;
   5431   } else {
   5432     // Do not serialize non-volatile loads against each other.
   5433     Root = Builder.DAG.getRoot();
   5434   }
   5435 
   5436   SDValue Ptr = Builder.getValue(PtrVal);
   5437   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
   5438                                         Ptr, MachinePointerInfo(PtrVal),
   5439                                         false /*volatile*/,
   5440                                         false /*nontemporal*/,
   5441                                         false /*isinvariant*/, 1 /* align=1 */);
   5442 
   5443   if (!ConstantMemory)
   5444     Builder.PendingLoads.push_back(LoadVal.getValue(1));
   5445   return LoadVal;
   5446 }
   5447 
   5448 
   5449 /// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
   5450 /// If so, return true and lower it, otherwise return false and it will be
   5451 /// lowered like a normal call.
   5452 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
   5453   // Verify that the prototype makes sense.  int memcmp(void*,void*,size_t)
   5454   if (I.getNumArgOperands() != 3)
   5455     return false;
   5456 
   5457   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
   5458   if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
   5459       !I.getArgOperand(2)->getType()->isIntegerTy() ||
   5460       !I.getType()->isIntegerTy())
   5461     return false;
   5462 
   5463   const ConstantInt *Size = dyn_cast<ConstantInt>(I.getArgOperand(2));
   5464 
   5465   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
   5466   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
   5467   if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
   5468     bool ActuallyDoIt = true;
   5469     MVT LoadVT;
   5470     Type *LoadTy;
   5471     switch (Size->getZExtValue()) {
   5472     default:
   5473       LoadVT = MVT::Other;
   5474       LoadTy = 0;
   5475       ActuallyDoIt = false;
   5476       break;
   5477     case 2:
   5478       LoadVT = MVT::i16;
   5479       LoadTy = Type::getInt16Ty(Size->getContext());
   5480       break;
   5481     case 4:
   5482       LoadVT = MVT::i32;
   5483       LoadTy = Type::getInt32Ty(Size->getContext());
   5484       break;
   5485     case 8:
   5486       LoadVT = MVT::i64;
   5487       LoadTy = Type::getInt64Ty(Size->getContext());
   5488       break;
   5489         /*
   5490     case 16:
   5491       LoadVT = MVT::v4i32;
   5492       LoadTy = Type::getInt32Ty(Size->getContext());
   5493       LoadTy = VectorType::get(LoadTy, 4);
   5494       break;
   5495          */
   5496     }
   5497 
   5498     // This turns into unaligned loads.  We only do this if the target natively
   5499     // supports the MVT we'll be loading or if it is small enough (<= 4) that
   5500     // we'll only produce a small number of byte loads.
   5501 
   5502     // Require that we can find a legal MVT, and only do this if the target
   5503     // supports unaligned loads of that type.  Expanding into byte loads would
   5504     // bloat the code.
   5505     const TargetLowering *TLI = TM.getTargetLowering();
   5506     if (ActuallyDoIt && Size->getZExtValue() > 4) {
   5507       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
   5508       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
   5509       if (!TLI->isTypeLegal(LoadVT) ||!TLI->allowsUnalignedMemoryAccesses(LoadVT))
   5510         ActuallyDoIt = false;
   5511     }
   5512 
   5513     if (ActuallyDoIt) {
   5514       SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
   5515       SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
   5516 
   5517       SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal,
   5518                                  ISD::SETNE);
   5519       EVT CallVT = TLI->getValueType(I.getType(), true);
   5520       setValue(&I, DAG.getZExtOrTrunc(Res, getCurSDLoc(), CallVT));
   5521       return true;
   5522     }
   5523   }
   5524 
   5525 
   5526   return false;
   5527 }
   5528 
   5529 /// visitUnaryFloatCall - If a call instruction is a unary floating-point
   5530 /// operation (as expected), translate it to an SDNode with the specified opcode
   5531 /// and return true.
   5532 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
   5533                                               unsigned Opcode) {
   5534   // Sanity check that it really is a unary floating-point call.
   5535   if (I.getNumArgOperands() != 1 ||
   5536       !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
   5537       I.getType() != I.getArgOperand(0)->getType() ||
   5538       !I.onlyReadsMemory())
   5539     return false;
   5540 
   5541   SDValue Tmp = getValue(I.getArgOperand(0));
   5542   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
   5543   return true;
   5544 }
   5545 
   5546 void SelectionDAGBuilder::visitCall(const CallInst &I) {
   5547   // Handle inline assembly differently.
   5548   if (isa<InlineAsm>(I.getCalledValue())) {
   5549     visitInlineAsm(&I);
   5550     return;
   5551   }
   5552 
   5553   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   5554   ComputeUsesVAFloatArgument(I, &MMI);
   5555 
   5556   const char *RenameFn = 0;
   5557   if (Function *F = I.getCalledFunction()) {
   5558     if (F->isDeclaration()) {
   5559       if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
   5560         if (unsigned IID = II->getIntrinsicID(F)) {
   5561           RenameFn = visitIntrinsicCall(I, IID);
   5562           if (!RenameFn)
   5563             return;
   5564         }
   5565       }
   5566       if (unsigned IID = F->getIntrinsicID()) {
   5567         RenameFn = visitIntrinsicCall(I, IID);
   5568         if (!RenameFn)
   5569           return;
   5570       }
   5571     }
   5572 
   5573     // Check for well-known libc/libm calls.  If the function is internal, it
   5574     // can't be a library call.
   5575     LibFunc::Func Func;
   5576     if (!F->hasLocalLinkage() && F->hasName() &&
   5577         LibInfo->getLibFunc(F->getName(), Func) &&
   5578         LibInfo->hasOptimizedCodeGen(Func)) {
   5579       switch (Func) {
   5580       default: break;
   5581       case LibFunc::copysign:
   5582       case LibFunc::copysignf:
   5583       case LibFunc::copysignl:
   5584         if (I.getNumArgOperands() == 2 &&   // Basic sanity checks.
   5585             I.getArgOperand(0)->getType()->isFloatingPointTy() &&
   5586             I.getType() == I.getArgOperand(0)->getType() &&
   5587             I.getType() == I.getArgOperand(1)->getType() &&
   5588             I.onlyReadsMemory()) {
   5589           SDValue LHS = getValue(I.getArgOperand(0));
   5590           SDValue RHS = getValue(I.getArgOperand(1));
   5591           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
   5592                                    LHS.getValueType(), LHS, RHS));
   5593           return;
   5594         }
   5595         break;
   5596       case LibFunc::fabs:
   5597       case LibFunc::fabsf:
   5598       case LibFunc::fabsl:
   5599         if (visitUnaryFloatCall(I, ISD::FABS))
   5600           return;
   5601         break;
   5602       case LibFunc::sin:
   5603       case LibFunc::sinf:
   5604       case LibFunc::sinl:
   5605         if (visitUnaryFloatCall(I, ISD::FSIN))
   5606           return;
   5607         break;
   5608       case LibFunc::cos:
   5609       case LibFunc::cosf:
   5610       case LibFunc::cosl:
   5611         if (visitUnaryFloatCall(I, ISD::FCOS))
   5612           return;
   5613         break;
   5614       case LibFunc::sqrt:
   5615       case LibFunc::sqrtf:
   5616       case LibFunc::sqrtl:
   5617       case LibFunc::sqrt_finite:
   5618       case LibFunc::sqrtf_finite:
   5619       case LibFunc::sqrtl_finite:
   5620         if (visitUnaryFloatCall(I, ISD::FSQRT))
   5621           return;
   5622         break;
   5623       case LibFunc::floor:
   5624       case LibFunc::floorf:
   5625       case LibFunc::floorl:
   5626         if (visitUnaryFloatCall(I, ISD::FFLOOR))
   5627           return;
   5628         break;
   5629       case LibFunc::nearbyint:
   5630       case LibFunc::nearbyintf:
   5631       case LibFunc::nearbyintl:
   5632         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
   5633           return;
   5634         break;
   5635       case LibFunc::ceil:
   5636       case LibFunc::ceilf:
   5637       case LibFunc::ceill:
   5638         if (visitUnaryFloatCall(I, ISD::FCEIL))
   5639           return;
   5640         break;
   5641       case LibFunc::rint:
   5642       case LibFunc::rintf:
   5643       case LibFunc::rintl:
   5644         if (visitUnaryFloatCall(I, ISD::FRINT))
   5645           return;
   5646         break;
   5647       case LibFunc::trunc:
   5648       case LibFunc::truncf:
   5649       case LibFunc::truncl:
   5650         if (visitUnaryFloatCall(I, ISD::FTRUNC))
   5651           return;
   5652         break;
   5653       case LibFunc::log2:
   5654       case LibFunc::log2f:
   5655       case LibFunc::log2l:
   5656         if (visitUnaryFloatCall(I, ISD::FLOG2))
   5657           return;
   5658         break;
   5659       case LibFunc::exp2:
   5660       case LibFunc::exp2f:
   5661       case LibFunc::exp2l:
   5662         if (visitUnaryFloatCall(I, ISD::FEXP2))
   5663           return;
   5664         break;
   5665       case LibFunc::memcmp:
   5666         if (visitMemCmpCall(I))
   5667           return;
   5668         break;
   5669       }
   5670     }
   5671   }
   5672 
   5673   SDValue Callee;
   5674   if (!RenameFn)
   5675     Callee = getValue(I.getCalledValue());
   5676   else
   5677     Callee = DAG.getExternalSymbol(RenameFn,
   5678                                    TM.getTargetLowering()->getPointerTy());
   5679 
   5680   // Check if we can potentially perform a tail call. More detailed checking is
   5681   // be done within LowerCallTo, after more information about the call is known.
   5682   LowerCallTo(&I, Callee, I.isTailCall());
   5683 }
   5684 
   5685 namespace {
   5686 
   5687 /// AsmOperandInfo - This contains information for each constraint that we are
   5688 /// lowering.
   5689 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
   5690 public:
   5691   /// CallOperand - If this is the result output operand or a clobber
   5692   /// this is null, otherwise it is the incoming operand to the CallInst.
   5693   /// This gets modified as the asm is processed.
   5694   SDValue CallOperand;
   5695 
   5696   /// AssignedRegs - If this is a register or register class operand, this
   5697   /// contains the set of register corresponding to the operand.
   5698   RegsForValue AssignedRegs;
   5699 
   5700   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
   5701     : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
   5702   }
   5703 
   5704   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
   5705   /// corresponds to.  If there is no Value* for this operand, it returns
   5706   /// MVT::Other.
   5707   EVT getCallOperandValEVT(LLVMContext &Context,
   5708                            const TargetLowering &TLI,
   5709                            const DataLayout *TD) const {
   5710     if (CallOperandVal == 0) return MVT::Other;
   5711 
   5712     if (isa<BasicBlock>(CallOperandVal))
   5713       return TLI.getPointerTy();
   5714 
   5715     llvm::Type *OpTy = CallOperandVal->getType();
   5716 
   5717     // FIXME: code duplicated from TargetLowering::ParseConstraints().
   5718     // If this is an indirect operand, the operand is a pointer to the
   5719     // accessed type.
   5720     if (isIndirect) {
   5721       llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
   5722       if (!PtrTy)
   5723         report_fatal_error("Indirect operand for inline asm not a pointer!");
   5724       OpTy = PtrTy->getElementType();
   5725     }
   5726 
   5727     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
   5728     if (StructType *STy = dyn_cast<StructType>(OpTy))
   5729       if (STy->getNumElements() == 1)
   5730         OpTy = STy->getElementType(0);
   5731 
   5732     // If OpTy is not a single value, it may be a struct/union that we
   5733     // can tile with integers.
   5734     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
   5735       unsigned BitSize = TD->getTypeSizeInBits(OpTy);
   5736       switch (BitSize) {
   5737       default: break;
   5738       case 1:
   5739       case 8:
   5740       case 16:
   5741       case 32:
   5742       case 64:
   5743       case 128:
   5744         OpTy = IntegerType::get(Context, BitSize);
   5745         break;
   5746       }
   5747     }
   5748 
   5749     return TLI.getValueType(OpTy, true);
   5750   }
   5751 };
   5752 
   5753 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
   5754 
   5755 } // end anonymous namespace
   5756 
   5757 /// GetRegistersForValue - Assign registers (virtual or physical) for the
   5758 /// specified operand.  We prefer to assign virtual registers, to allow the
   5759 /// register allocator to handle the assignment process.  However, if the asm
   5760 /// uses features that we can't model on machineinstrs, we have SDISel do the
   5761 /// allocation.  This produces generally horrible, but correct, code.
   5762 ///
   5763 ///   OpInfo describes the operand.
   5764 ///
   5765 static void GetRegistersForValue(SelectionDAG &DAG,
   5766                                  const TargetLowering &TLI,
   5767                                  SDLoc DL,
   5768                                  SDISelAsmOperandInfo &OpInfo) {
   5769   LLVMContext &Context = *DAG.getContext();
   5770 
   5771   MachineFunction &MF = DAG.getMachineFunction();
   5772   SmallVector<unsigned, 4> Regs;
   5773 
   5774   // If this is a constraint for a single physreg, or a constraint for a
   5775   // register class, find it.
   5776   std::pair<unsigned, const TargetRegisterClass*> PhysReg =
   5777     TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
   5778                                      OpInfo.ConstraintVT);
   5779 
   5780   unsigned NumRegs = 1;
   5781   if (OpInfo.ConstraintVT != MVT::Other) {
   5782     // If this is a FP input in an integer register (or visa versa) insert a bit
   5783     // cast of the input value.  More generally, handle any case where the input
   5784     // value disagrees with the register class we plan to stick this in.
   5785     if (OpInfo.Type == InlineAsm::isInput &&
   5786         PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
   5787       // Try to convert to the first EVT that the reg class contains.  If the
   5788       // types are identical size, use a bitcast to convert (e.g. two differing
   5789       // vector types).
   5790       MVT RegVT = *PhysReg.second->vt_begin();
   5791       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
   5792         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
   5793                                          RegVT, OpInfo.CallOperand);
   5794         OpInfo.ConstraintVT = RegVT;
   5795       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
   5796         // If the input is a FP value and we want it in FP registers, do a
   5797         // bitcast to the corresponding integer type.  This turns an f64 value
   5798         // into i64, which can be passed with two i32 values on a 32-bit
   5799         // machine.
   5800         RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
   5801         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
   5802                                          RegVT, OpInfo.CallOperand);
   5803         OpInfo.ConstraintVT = RegVT;
   5804       }
   5805     }
   5806 
   5807     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
   5808   }
   5809 
   5810   MVT RegVT;
   5811   EVT ValueVT = OpInfo.ConstraintVT;
   5812 
   5813   // If this is a constraint for a specific physical register, like {r17},
   5814   // assign it now.
   5815   if (unsigned AssignedReg = PhysReg.first) {
   5816     const TargetRegisterClass *RC = PhysReg.second;
   5817     if (OpInfo.ConstraintVT == MVT::Other)
   5818       ValueVT = *RC->vt_begin();
   5819 
   5820     // Get the actual register value type.  This is important, because the user
   5821     // may have asked for (e.g.) the AX register in i32 type.  We need to
   5822     // remember that AX is actually i16 to get the right extension.
   5823     RegVT = *RC->vt_begin();
   5824 
   5825     // This is a explicit reference to a physical register.
   5826     Regs.push_back(AssignedReg);
   5827 
   5828     // If this is an expanded reference, add the rest of the regs to Regs.
   5829     if (NumRegs != 1) {
   5830       TargetRegisterClass::iterator I = RC->begin();
   5831       for (; *I != AssignedReg; ++I)
   5832         assert(I != RC->end() && "Didn't find reg!");
   5833 
   5834       // Already added the first reg.
   5835       --NumRegs; ++I;
   5836       for (; NumRegs; --NumRegs, ++I) {
   5837         assert(I != RC->end() && "Ran out of registers to allocate!");
   5838         Regs.push_back(*I);
   5839       }
   5840     }
   5841 
   5842     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
   5843     return;
   5844   }
   5845 
   5846   // Otherwise, if this was a reference to an LLVM register class, create vregs
   5847   // for this reference.
   5848   if (const TargetRegisterClass *RC = PhysReg.second) {
   5849     RegVT = *RC->vt_begin();
   5850     if (OpInfo.ConstraintVT == MVT::Other)
   5851       ValueVT = RegVT;
   5852 
   5853     // Create the appropriate number of virtual registers.
   5854     MachineRegisterInfo &RegInfo = MF.getRegInfo();
   5855     for (; NumRegs; --NumRegs)
   5856       Regs.push_back(RegInfo.createVirtualRegister(RC));
   5857 
   5858     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
   5859     return;
   5860   }
   5861 
   5862   // Otherwise, we couldn't allocate enough registers for this.
   5863 }
   5864 
   5865 /// visitInlineAsm - Handle a call to an InlineAsm object.
   5866 ///
   5867 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
   5868   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
   5869 
   5870   /// ConstraintOperands - Information about all of the constraints.
   5871   SDISelAsmOperandInfoVector ConstraintOperands;
   5872 
   5873   const TargetLowering *TLI = TM.getTargetLowering();
   5874   TargetLowering::AsmOperandInfoVector
   5875     TargetConstraints = TLI->ParseConstraints(CS);
   5876 
   5877   bool hasMemory = false;
   5878 
   5879   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
   5880   unsigned ResNo = 0;   // ResNo - The result number of the next output.
   5881   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
   5882     ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
   5883     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
   5884 
   5885     MVT OpVT = MVT::Other;
   5886 
   5887     // Compute the value type for each operand.
   5888     switch (OpInfo.Type) {
   5889     case InlineAsm::isOutput:
   5890       // Indirect outputs just consume an argument.
   5891       if (OpInfo.isIndirect) {
   5892         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
   5893         break;
   5894       }
   5895 
   5896       // The return value of the call is this value.  As such, there is no
   5897       // corresponding argument.
   5898       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
   5899       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
   5900         OpVT = TLI->getSimpleValueType(STy->getElementType(ResNo));
   5901       } else {
   5902         assert(ResNo == 0 && "Asm only has one result!");
   5903         OpVT = TLI->getSimpleValueType(CS.getType());
   5904       }
   5905       ++ResNo;
   5906       break;
   5907     case InlineAsm::isInput:
   5908       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
   5909       break;
   5910     case InlineAsm::isClobber:
   5911       // Nothing to do.
   5912       break;
   5913     }
   5914 
   5915     // If this is an input or an indirect output, process the call argument.
   5916     // BasicBlocks are labels, currently appearing only in asm's.
   5917     if (OpInfo.CallOperandVal) {
   5918       if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
   5919         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
   5920       } else {
   5921         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
   5922       }
   5923 
   5924       OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, TD).
   5925         getSimpleVT();
   5926     }
   5927 
   5928     OpInfo.ConstraintVT = OpVT;
   5929 
   5930     // Indirect operand accesses access memory.
   5931     if (OpInfo.isIndirect)
   5932       hasMemory = true;
   5933     else {
   5934       for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
   5935         TargetLowering::ConstraintType
   5936           CType = TLI->getConstraintType(OpInfo.Codes[j]);
   5937         if (CType == TargetLowering::C_Memory) {
   5938           hasMemory = true;
   5939           break;
   5940         }
   5941       }
   5942     }
   5943   }
   5944 
   5945   SDValue Chain, Flag;
   5946 
   5947   // We won't need to flush pending loads if this asm doesn't touch
   5948   // memory and is nonvolatile.
   5949   if (hasMemory || IA->hasSideEffects())
   5950     Chain = getRoot();
   5951   else
   5952     Chain = DAG.getRoot();
   5953 
   5954   // Second pass over the constraints: compute which constraint option to use
   5955   // and assign registers to constraints that want a specific physreg.
   5956   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
   5957     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
   5958 
   5959     // If this is an output operand with a matching input operand, look up the
   5960     // matching input. If their types mismatch, e.g. one is an integer, the
   5961     // other is floating point, or their sizes are different, flag it as an
   5962     // error.
   5963     if (OpInfo.hasMatchingInput()) {
   5964       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
   5965 
   5966       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
   5967         std::pair<unsigned, const TargetRegisterClass*> MatchRC =
   5968           TLI->getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
   5969                                             OpInfo.ConstraintVT);
   5970         std::pair<unsigned, const TargetRegisterClass*> InputRC =
   5971           TLI->getRegForInlineAsmConstraint(Input.ConstraintCode,
   5972                                             Input.ConstraintVT);
   5973         if ((OpInfo.ConstraintVT.isInteger() !=
   5974              Input.ConstraintVT.isInteger()) ||
   5975             (MatchRC.second != InputRC.second)) {
   5976           report_fatal_error("Unsupported asm: input constraint"
   5977                              " with a matching output constraint of"
   5978                              " incompatible type!");
   5979         }
   5980         Input.ConstraintVT = OpInfo.ConstraintVT;
   5981       }
   5982     }
   5983 
   5984     // Compute the constraint code and ConstraintType to use.
   5985     TLI->ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
   5986 
   5987     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
   5988         OpInfo.Type == InlineAsm::isClobber)
   5989       continue;
   5990 
   5991     // If this is a memory input, and if the operand is not indirect, do what we
   5992     // need to to provide an address for the memory input.
   5993     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
   5994         !OpInfo.isIndirect) {
   5995       assert((OpInfo.isMultipleAlternative ||
   5996               (OpInfo.Type == InlineAsm::isInput)) &&
   5997              "Can only indirectify direct input operands!");
   5998 
   5999       // Memory operands really want the address of the value.  If we don't have
   6000       // an indirect input, put it in the constpool if we can, otherwise spill
   6001       // it to a stack slot.
   6002       // TODO: This isn't quite right. We need to handle these according to
   6003       // the addressing mode that the constraint wants. Also, this may take
   6004       // an additional register for the computation and we don't want that
   6005       // either.
   6006 
   6007       // If the operand is a float, integer, or vector constant, spill to a
   6008       // constant pool entry to get its address.
   6009       const Value *OpVal = OpInfo.CallOperandVal;
   6010       if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
   6011           isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
   6012         OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
   6013                                                  TLI->getPointerTy());
   6014       } else {
   6015         // Otherwise, create a stack slot and emit a store to it before the
   6016         // asm.
   6017         Type *Ty = OpVal->getType();
   6018         uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
   6019         unsigned Align  = TLI->getDataLayout()->getPrefTypeAlignment(Ty);
   6020         MachineFunction &MF = DAG.getMachineFunction();
   6021         int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
   6022         SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI->getPointerTy());
   6023         Chain = DAG.getStore(Chain, getCurSDLoc(),
   6024                              OpInfo.CallOperand, StackSlot,
   6025                              MachinePointerInfo::getFixedStack(SSFI),
   6026                              false, false, 0);
   6027         OpInfo.CallOperand = StackSlot;
   6028       }
   6029 
   6030       // There is no longer a Value* corresponding to this operand.
   6031       OpInfo.CallOperandVal = 0;
   6032 
   6033       // It is now an indirect operand.
   6034       OpInfo.isIndirect = true;
   6035     }
   6036 
   6037     // If this constraint is for a specific register, allocate it before
   6038     // anything else.
   6039     if (OpInfo.ConstraintType == TargetLowering::C_Register)
   6040       GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
   6041   }
   6042 
   6043   // Second pass - Loop over all of the operands, assigning virtual or physregs
   6044   // to register class operands.
   6045   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
   6046     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
   6047 
   6048     // C_Register operands have already been allocated, Other/Memory don't need
   6049     // to be.
   6050     if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
   6051       GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
   6052   }
   6053 
   6054   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
   6055   std::vector<SDValue> AsmNodeOperands;
   6056   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
   6057   AsmNodeOperands.push_back(
   6058           DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
   6059                                       TLI->getPointerTy()));
   6060 
   6061   // If we have a !srcloc metadata node associated with it, we want to attach
   6062   // this to the ultimately generated inline asm machineinstr.  To do this, we
   6063   // pass in the third operand as this (potentially null) inline asm MDNode.
   6064   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
   6065   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
   6066 
   6067   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
   6068   // bits as operand 3.
   6069   unsigned ExtraInfo = 0;
   6070   if (IA->hasSideEffects())
   6071     ExtraInfo |= InlineAsm::Extra_HasSideEffects;
   6072   if (IA->isAlignStack())
   6073     ExtraInfo |= InlineAsm::Extra_IsAlignStack;
   6074   // Set the asm dialect.
   6075   ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
   6076 
   6077   // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
   6078   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
   6079     TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
   6080 
   6081     // Compute the constraint code and ConstraintType to use.
   6082     TLI->ComputeConstraintToUse(OpInfo, SDValue());
   6083 
   6084     // Ideally, we would only check against memory constraints.  However, the
   6085     // meaning of an other constraint can be target-specific and we can't easily
   6086     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
   6087     // for other constriants as well.
   6088     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
   6089         OpInfo.ConstraintType == TargetLowering::C_Other) {
   6090       if (OpInfo.Type == InlineAsm::isInput)
   6091         ExtraInfo |= InlineAsm::Extra_MayLoad;
   6092       else if (OpInfo.Type == InlineAsm::isOutput)
   6093         ExtraInfo |= InlineAsm::Extra_MayStore;
   6094       else if (OpInfo.Type == InlineAsm::isClobber)
   6095         ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
   6096     }
   6097   }
   6098 
   6099   AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
   6100                                                   TLI->getPointerTy()));
   6101 
   6102   // Loop over all of the inputs, copying the operand values into the
   6103   // appropriate registers and processing the output regs.
   6104   RegsForValue RetValRegs;
   6105 
   6106   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
   6107   std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
   6108 
   6109   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
   6110     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
   6111 
   6112     switch (OpInfo.Type) {
   6113     case InlineAsm::isOutput: {
   6114       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
   6115           OpInfo.ConstraintType != TargetLowering::C_Register) {
   6116         // Memory output, or 'other' output (e.g. 'X' constraint).
   6117         assert(OpInfo.isIndirect && "Memory output must be indirect operand");
   6118 
   6119         // Add information to the INLINEASM node to know about this output.
   6120         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
   6121         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
   6122                                                         TLI->getPointerTy()));
   6123         AsmNodeOperands.push_back(OpInfo.CallOperand);
   6124         break;
   6125       }
   6126 
   6127       // Otherwise, this is a register or register class output.
   6128 
   6129       // Copy the output from the appropriate register.  Find a register that
   6130       // we can use.
   6131       if (OpInfo.AssignedRegs.Regs.empty()) {
   6132         LLVMContext &Ctx = *DAG.getContext();
   6133         Ctx.emitError(CS.getInstruction(),
   6134                       "couldn't allocate output register for constraint '" +
   6135                           Twine(OpInfo.ConstraintCode) + "'");
   6136         return;
   6137       }
   6138 
   6139       // If this is an indirect operand, store through the pointer after the
   6140       // asm.
   6141       if (OpInfo.isIndirect) {
   6142         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
   6143                                                       OpInfo.CallOperandVal));
   6144       } else {
   6145         // This is the result value of the call.
   6146         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
   6147         // Concatenate this output onto the outputs list.
   6148         RetValRegs.append(OpInfo.AssignedRegs);
   6149       }
   6150 
   6151       // Add information to the INLINEASM node to know that this register is
   6152       // set.
   6153       OpInfo.AssignedRegs
   6154           .AddInlineAsmOperands(OpInfo.isEarlyClobber
   6155                                     ? InlineAsm::Kind_RegDefEarlyClobber
   6156                                     : InlineAsm::Kind_RegDef,
   6157                                 false, 0, DAG, AsmNodeOperands);
   6158       break;
   6159     }
   6160     case InlineAsm::isInput: {
   6161       SDValue InOperandVal = OpInfo.CallOperand;
   6162 
   6163       if (OpInfo.isMatchingInputConstraint()) {   // Matching constraint?
   6164         // If this is required to match an output register we have already set,
   6165         // just use its register.
   6166         unsigned OperandNo = OpInfo.getMatchedOperand();
   6167 
   6168         // Scan until we find the definition we already emitted of this operand.
   6169         // When we find it, create a RegsForValue operand.
   6170         unsigned CurOp = InlineAsm::Op_FirstOperand;
   6171         for (; OperandNo; --OperandNo) {
   6172           // Advance to the next operand.
   6173           unsigned OpFlag =
   6174             cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
   6175           assert((InlineAsm::isRegDefKind(OpFlag) ||
   6176                   InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
   6177                   InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
   6178           CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
   6179         }
   6180 
   6181         unsigned OpFlag =
   6182           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
   6183         if (InlineAsm::isRegDefKind(OpFlag) ||
   6184             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
   6185           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
   6186           if (OpInfo.isIndirect) {
   6187             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
   6188             LLVMContext &Ctx = *DAG.getContext();
   6189             Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
   6190                                                " don't know how to handle tied "
   6191                                                "indirect register inputs");
   6192             return;
   6193           }
   6194 
   6195           RegsForValue MatchedRegs;
   6196           MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
   6197           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
   6198           MatchedRegs.RegVTs.push_back(RegVT);
   6199           MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
   6200           for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
   6201                i != e; ++i) {
   6202             if (const TargetRegisterClass *RC = TLI->getRegClassFor(RegVT))
   6203               MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC));
   6204             else {
   6205               LLVMContext &Ctx = *DAG.getContext();
   6206               Ctx.emitError(CS.getInstruction(),
   6207                             "inline asm error: This value"
   6208                             " type register class is not natively supported!");
   6209               return;
   6210             }
   6211           }
   6212           // Use the produced MatchedRegs object to
   6213           MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
   6214                                     Chain, &Flag, CS.getInstruction());
   6215           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
   6216                                            true, OpInfo.getMatchedOperand(),
   6217                                            DAG, AsmNodeOperands);
   6218           break;
   6219         }
   6220 
   6221         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
   6222         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
   6223                "Unexpected number of operands");
   6224         // Add information to the INLINEASM node to know about this input.
   6225         // See InlineAsm.h isUseOperandTiedToDef.
   6226         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
   6227                                                     OpInfo.getMatchedOperand());
   6228         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
   6229                                                         TLI->getPointerTy()));
   6230         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
   6231         break;
   6232       }
   6233 
   6234       // Treat indirect 'X' constraint as memory.
   6235       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
   6236           OpInfo.isIndirect)
   6237         OpInfo.ConstraintType = TargetLowering::C_Memory;
   6238 
   6239       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
   6240         std::vector<SDValue> Ops;
   6241         TLI->LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
   6242                                           Ops, DAG);
   6243         if (Ops.empty()) {
   6244           LLVMContext &Ctx = *DAG.getContext();
   6245           Ctx.emitError(CS.getInstruction(),
   6246                         "invalid operand for inline asm constraint '" +
   6247                             Twine(OpInfo.ConstraintCode) + "'");
   6248           return;
   6249         }
   6250 
   6251         // Add information to the INLINEASM node to know about this input.
   6252         unsigned ResOpType =
   6253           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
   6254         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
   6255                                                         TLI->getPointerTy()));
   6256         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
   6257         break;
   6258       }
   6259 
   6260       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
   6261         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
   6262         assert(InOperandVal.getValueType() == TLI->getPointerTy() &&
   6263                "Memory operands expect pointer values");
   6264 
   6265         // Add information to the INLINEASM node to know about this input.
   6266         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
   6267         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
   6268                                                         TLI->getPointerTy()));
   6269         AsmNodeOperands.push_back(InOperandVal);
   6270         break;
   6271       }
   6272 
   6273       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
   6274               OpInfo.ConstraintType == TargetLowering::C_Register) &&
   6275              "Unknown constraint type!");
   6276 
   6277       // TODO: Support this.
   6278       if (OpInfo.isIndirect) {
   6279         LLVMContext &Ctx = *DAG.getContext();
   6280         Ctx.emitError(CS.getInstruction(),
   6281                       "Don't know how to handle indirect register inputs yet "
   6282                       "for constraint '" +
   6283                           Twine(OpInfo.ConstraintCode) + "'");
   6284         return;
   6285       }
   6286 
   6287       // Copy the input into the appropriate registers.
   6288       if (OpInfo.AssignedRegs.Regs.empty()) {
   6289         LLVMContext &Ctx = *DAG.getContext();
   6290         Ctx.emitError(CS.getInstruction(),
   6291                       "couldn't allocate input reg for constraint '" +
   6292                           Twine(OpInfo.ConstraintCode) + "'");
   6293         return;
   6294       }
   6295 
   6296       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
   6297                                         Chain, &Flag, CS.getInstruction());
   6298 
   6299       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
   6300                                                DAG, AsmNodeOperands);
   6301       break;
   6302     }
   6303     case InlineAsm::isClobber: {
   6304       // Add the clobbered value to the operand list, so that the register
   6305       // allocator is aware that the physreg got clobbered.
   6306       if (!OpInfo.AssignedRegs.Regs.empty())
   6307         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
   6308                                                  false, 0, DAG,
   6309                                                  AsmNodeOperands);
   6310       break;
   6311     }
   6312     }
   6313   }
   6314 
   6315   // Finish up input operands.  Set the input chain and add the flag last.
   6316   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
   6317   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
   6318 
   6319   Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
   6320                       DAG.getVTList(MVT::Other, MVT::Glue),
   6321                       &AsmNodeOperands[0], AsmNodeOperands.size());
   6322   Flag = Chain.getValue(1);
   6323 
   6324   // If this asm returns a register value, copy the result from that register
   6325   // and set it as the value of the call.
   6326   if (!RetValRegs.Regs.empty()) {
   6327     SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
   6328                                              Chain, &Flag, CS.getInstruction());
   6329 
   6330     // FIXME: Why don't we do this for inline asms with MRVs?
   6331     if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
   6332       EVT ResultType = TLI->getValueType(CS.getType());
   6333 
   6334       // If any of the results of the inline asm is a vector, it may have the
   6335       // wrong width/num elts.  This can happen for register classes that can
   6336       // contain multiple different value types.  The preg or vreg allocated may
   6337       // not have the same VT as was expected.  Convert it to the right type
   6338       // with bit_convert.
   6339       if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
   6340         Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
   6341                           ResultType, Val);
   6342 
   6343       } else if (ResultType != Val.getValueType() &&
   6344                  ResultType.isInteger() && Val.getValueType().isInteger()) {
   6345         // If a result value was tied to an input value, the computed result may
   6346         // have a wider width than the expected result.  Extract the relevant
   6347         // portion.
   6348         Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
   6349       }
   6350 
   6351       assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
   6352     }
   6353 
   6354     setValue(CS.getInstruction(), Val);
   6355     // Don't need to use this as a chain in this case.
   6356     if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
   6357       return;
   6358   }
   6359 
   6360   std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
   6361 
   6362   // Process indirect outputs, first output all of the flagged copies out of
   6363   // physregs.
   6364   for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
   6365     RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
   6366     const Value *Ptr = IndirectStoresToEmit[i].second;
   6367     SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
   6368                                              Chain, &Flag, IA);
   6369     StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
   6370   }
   6371 
   6372   // Emit the non-flagged stores from the physregs.
   6373   SmallVector<SDValue, 8> OutChains;
   6374   for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
   6375     SDValue Val = DAG.getStore(Chain, getCurSDLoc(),
   6376                                StoresToEmit[i].first,
   6377                                getValue(StoresToEmit[i].second),
   6378                                MachinePointerInfo(StoresToEmit[i].second),
   6379                                false, false, 0);
   6380     OutChains.push_back(Val);
   6381   }
   6382 
   6383   if (!OutChains.empty())
   6384     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
   6385                         &OutChains[0], OutChains.size());
   6386 
   6387   DAG.setRoot(Chain);
   6388 }
   6389 
   6390 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
   6391   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
   6392                           MVT::Other, getRoot(),
   6393                           getValue(I.getArgOperand(0)),
   6394                           DAG.getSrcValue(I.getArgOperand(0))));
   6395 }
   6396 
   6397 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
   6398   const TargetLowering *TLI = TM.getTargetLowering();
   6399   const DataLayout &TD = *TLI->getDataLayout();
   6400   SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
   6401                            getRoot(), getValue(I.getOperand(0)),
   6402                            DAG.getSrcValue(I.getOperand(0)),
   6403                            TD.getABITypeAlignment(I.getType()));
   6404   setValue(&I, V);
   6405   DAG.setRoot(V.getValue(1));
   6406 }
   6407 
   6408 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
   6409   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
   6410                           MVT::Other, getRoot(),
   6411                           getValue(I.getArgOperand(0)),
   6412                           DAG.getSrcValue(I.getArgOperand(0))));
   6413 }
   6414 
   6415 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
   6416   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
   6417                           MVT::Other, getRoot(),
   6418                           getValue(I.getArgOperand(0)),
   6419                           getValue(I.getArgOperand(1)),
   6420                           DAG.getSrcValue(I.getArgOperand(0)),
   6421                           DAG.getSrcValue(I.getArgOperand(1))));
   6422 }
   6423 
   6424 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
   6425 /// implementation, which just calls LowerCall.
   6426 /// FIXME: When all targets are
   6427 /// migrated to using LowerCall, this hook should be integrated into SDISel.
   6428 std::pair<SDValue, SDValue>
   6429 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
   6430   // Handle the incoming return values from the call.
   6431   CLI.Ins.clear();
   6432   SmallVector<EVT, 4> RetTys;
   6433   ComputeValueVTs(*this, CLI.RetTy, RetTys);
   6434   for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
   6435     EVT VT = RetTys[I];
   6436     MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
   6437     unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
   6438     for (unsigned i = 0; i != NumRegs; ++i) {
   6439       ISD::InputArg MyFlags;
   6440       MyFlags.VT = RegisterVT;
   6441       MyFlags.Used = CLI.IsReturnValueUsed;
   6442       if (CLI.RetSExt)
   6443         MyFlags.Flags.setSExt();
   6444       if (CLI.RetZExt)
   6445         MyFlags.Flags.setZExt();
   6446       if (CLI.IsInReg)
   6447         MyFlags.Flags.setInReg();
   6448       CLI.Ins.push_back(MyFlags);
   6449     }
   6450   }
   6451 
   6452   // Handle all of the outgoing arguments.
   6453   CLI.Outs.clear();
   6454   CLI.OutVals.clear();
   6455   ArgListTy &Args = CLI.Args;
   6456   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
   6457     SmallVector<EVT, 4> ValueVTs;
   6458     ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
   6459     for (unsigned Value = 0, NumValues = ValueVTs.size();
   6460          Value != NumValues; ++Value) {
   6461       EVT VT = ValueVTs[Value];
   6462       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
   6463       SDValue Op = SDValue(Args[i].Node.getNode(),
   6464                            Args[i].Node.getResNo() + Value);
   6465       ISD::ArgFlagsTy Flags;
   6466       unsigned OriginalAlignment =
   6467         getDataLayout()->getABITypeAlignment(ArgTy);
   6468 
   6469       if (Args[i].isZExt)
   6470         Flags.setZExt();
   6471       if (Args[i].isSExt)
   6472         Flags.setSExt();
   6473       if (Args[i].isInReg)
   6474         Flags.setInReg();
   6475       if (Args[i].isSRet)
   6476         Flags.setSRet();
   6477       if (Args[i].isByVal) {
   6478         Flags.setByVal();
   6479         PointerType *Ty = cast<PointerType>(Args[i].Ty);
   6480         Type *ElementTy = Ty->getElementType();
   6481         Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
   6482         // For ByVal, alignment should come from FE.  BE will guess if this
   6483         // info is not there but there are cases it cannot get right.
   6484         unsigned FrameAlign;
   6485         if (Args[i].Alignment)
   6486           FrameAlign = Args[i].Alignment;
   6487         else
   6488           FrameAlign = getByValTypeAlignment(ElementTy);
   6489         Flags.setByValAlign(FrameAlign);
   6490       }
   6491       if (Args[i].isNest)
   6492         Flags.setNest();
   6493       Flags.setOrigAlign(OriginalAlignment);
   6494 
   6495       MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
   6496       unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
   6497       SmallVector<SDValue, 4> Parts(NumParts);
   6498       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
   6499 
   6500       if (Args[i].isSExt)
   6501         ExtendKind = ISD::SIGN_EXTEND;
   6502       else if (Args[i].isZExt)
   6503         ExtendKind = ISD::ZERO_EXTEND;
   6504 
   6505       // Conservatively only handle 'returned' on non-vectors for now
   6506       if (Args[i].isReturned && !Op.getValueType().isVector()) {
   6507         assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
   6508                "unexpected use of 'returned'");
   6509         // Before passing 'returned' to the target lowering code, ensure that
   6510         // either the register MVT and the actual EVT are the same size or that
   6511         // the return value and argument are extended in the same way; in these
   6512         // cases it's safe to pass the argument register value unchanged as the
   6513         // return register value (although it's at the target's option whether
   6514         // to do so)
   6515         // TODO: allow code generation to take advantage of partially preserved
   6516         // registers rather than clobbering the entire register when the
   6517         // parameter extension method is not compatible with the return
   6518         // extension method
   6519         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
   6520             (ExtendKind != ISD::ANY_EXTEND &&
   6521              CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt))
   6522         Flags.setReturned();
   6523       }
   6524 
   6525       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts,
   6526                      PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind);
   6527 
   6528       for (unsigned j = 0; j != NumParts; ++j) {
   6529         // if it isn't first piece, alignment must be 1
   6530         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(),
   6531                                i < CLI.NumFixedArgs,
   6532                                i, j*Parts[j].getValueType().getStoreSize());
   6533         if (NumParts > 1 && j == 0)
   6534           MyFlags.Flags.setSplit();
   6535         else if (j != 0)
   6536           MyFlags.Flags.setOrigAlign(1);
   6537 
   6538         CLI.Outs.push_back(MyFlags);
   6539         CLI.OutVals.push_back(Parts[j]);
   6540       }
   6541     }
   6542   }
   6543 
   6544   SmallVector<SDValue, 4> InVals;
   6545   CLI.Chain = LowerCall(CLI, InVals);
   6546 
   6547   // Verify that the target's LowerCall behaved as expected.
   6548   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
   6549          "LowerCall didn't return a valid chain!");
   6550   assert((!CLI.IsTailCall || InVals.empty()) &&
   6551          "LowerCall emitted a return value for a tail call!");
   6552   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
   6553          "LowerCall didn't emit the correct number of values!");
   6554 
   6555   // For a tail call, the return value is merely live-out and there aren't
   6556   // any nodes in the DAG representing it. Return a special value to
   6557   // indicate that a tail call has been emitted and no more Instructions
   6558   // should be processed in the current block.
   6559   if (CLI.IsTailCall) {
   6560     CLI.DAG.setRoot(CLI.Chain);
   6561     return std::make_pair(SDValue(), SDValue());
   6562   }
   6563 
   6564   DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
   6565           assert(InVals[i].getNode() &&
   6566                  "LowerCall emitted a null value!");
   6567           assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
   6568                  "LowerCall emitted a value with the wrong type!");
   6569         });
   6570 
   6571   // Collect the legal value parts into potentially illegal values
   6572   // that correspond to the original function's return values.
   6573   ISD::NodeType AssertOp = ISD::DELETED_NODE;
   6574   if (CLI.RetSExt)
   6575     AssertOp = ISD::AssertSext;
   6576   else if (CLI.RetZExt)
   6577     AssertOp = ISD::AssertZext;
   6578   SmallVector<SDValue, 4> ReturnValues;
   6579   unsigned CurReg = 0;
   6580   for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
   6581     EVT VT = RetTys[I];
   6582     MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
   6583     unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
   6584 
   6585     ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
   6586                                             NumRegs, RegisterVT, VT, NULL,
   6587                                             AssertOp));
   6588     CurReg += NumRegs;
   6589   }
   6590 
   6591   // For a function returning void, there is no return value. We can't create
   6592   // such a node, so we just return a null return value in that case. In
   6593   // that case, nothing will actually look at the value.
   6594   if (ReturnValues.empty())
   6595     return std::make_pair(SDValue(), CLI.Chain);
   6596 
   6597   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
   6598                                 CLI.DAG.getVTList(&RetTys[0], RetTys.size()),
   6599                             &ReturnValues[0], ReturnValues.size());
   6600   return std::make_pair(Res, CLI.Chain);
   6601 }
   6602 
   6603 void TargetLowering::LowerOperationWrapper(SDNode *N,
   6604                                            SmallVectorImpl<SDValue> &Results,
   6605                                            SelectionDAG &DAG) const {
   6606   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
   6607   if (Res.getNode())
   6608     Results.push_back(Res);
   6609 }
   6610 
   6611 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   6612   llvm_unreachable("LowerOperation not implemented for this target!");
   6613 }
   6614 
   6615 void
   6616 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
   6617   SDValue Op = getNonRegisterValue(V);
   6618   assert((Op.getOpcode() != ISD::CopyFromReg ||
   6619           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
   6620          "Copy from a reg to the same reg!");
   6621   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
   6622 
   6623   const TargetLowering *TLI = TM.getTargetLowering();
   6624   RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType());
   6625   SDValue Chain = DAG.getEntryNode();
   6626   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, 0, V);
   6627   PendingExports.push_back(Chain);
   6628 }
   6629 
   6630 #include "llvm/CodeGen/SelectionDAGISel.h"
   6631 
   6632 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
   6633 /// entry block, return true.  This includes arguments used by switches, since
   6634 /// the switch may expand into multiple basic blocks.
   6635 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
   6636   // With FastISel active, we may be splitting blocks, so force creation
   6637   // of virtual registers for all non-dead arguments.
   6638   if (FastISel)
   6639     return A->use_empty();
   6640 
   6641   const BasicBlock *Entry = A->getParent()->begin();
   6642   for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
   6643        UI != E; ++UI) {
   6644     const User *U = *UI;
   6645     if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
   6646       return false;  // Use not in entry block.
   6647   }
   6648   return true;
   6649 }
   6650 
   6651 void SelectionDAGISel::LowerArguments(const Function &F) {
   6652   SelectionDAG &DAG = SDB->DAG;
   6653   SDLoc dl = SDB->getCurSDLoc();
   6654   const TargetLowering *TLI = getTargetLowering();
   6655   const DataLayout *TD = TLI->getDataLayout();
   6656   SmallVector<ISD::InputArg, 16> Ins;
   6657 
   6658   if (!FuncInfo->CanLowerReturn) {
   6659     // Put in an sret pointer parameter before all the other parameters.
   6660     SmallVector<EVT, 1> ValueVTs;
   6661     ComputeValueVTs(*getTargetLowering(),
   6662                     PointerType::getUnqual(F.getReturnType()), ValueVTs);
   6663 
   6664     // NOTE: Assuming that a pointer will never break down to more than one VT
   6665     // or one register.
   6666     ISD::ArgFlagsTy Flags;
   6667     Flags.setSRet();
   6668     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
   6669     ISD::InputArg RetArg(Flags, RegisterVT, true, 0, 0);
   6670     Ins.push_back(RetArg);
   6671   }
   6672 
   6673   // Set up the incoming argument description vector.
   6674   unsigned Idx = 1;
   6675   for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
   6676        I != E; ++I, ++Idx) {
   6677     SmallVector<EVT, 4> ValueVTs;
   6678     ComputeValueVTs(*TLI, I->getType(), ValueVTs);
   6679     bool isArgValueUsed = !I->use_empty();
   6680     for (unsigned Value = 0, NumValues = ValueVTs.size();
   6681          Value != NumValues; ++Value) {
   6682       EVT VT = ValueVTs[Value];
   6683       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
   6684       ISD::ArgFlagsTy Flags;
   6685       unsigned OriginalAlignment =
   6686         TD->getABITypeAlignment(ArgTy);
   6687 
   6688       if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
   6689         Flags.setZExt();
   6690       if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
   6691         Flags.setSExt();
   6692       if (F.getAttributes().hasAttribute(Idx, Attribute::InReg))
   6693         Flags.setInReg();
   6694       if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
   6695         Flags.setSRet();
   6696       if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal)) {
   6697         Flags.setByVal();
   6698         PointerType *Ty = cast<PointerType>(I->getType());
   6699         Type *ElementTy = Ty->getElementType();
   6700         Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
   6701         // For ByVal, alignment should be passed from FE.  BE will guess if
   6702         // this info is not there but there are cases it cannot get right.
   6703         unsigned FrameAlign;
   6704         if (F.getParamAlignment(Idx))
   6705           FrameAlign = F.getParamAlignment(Idx);
   6706         else
   6707           FrameAlign = TLI->getByValTypeAlignment(ElementTy);
   6708         Flags.setByValAlign(FrameAlign);
   6709       }
   6710       if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
   6711         Flags.setNest();
   6712       Flags.setOrigAlign(OriginalAlignment);
   6713 
   6714       MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
   6715       unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
   6716       for (unsigned i = 0; i != NumRegs; ++i) {
   6717         ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed,
   6718                               Idx-1, i*RegisterVT.getStoreSize());
   6719         if (NumRegs > 1 && i == 0)
   6720           MyFlags.Flags.setSplit();
   6721         // if it isn't first piece, alignment must be 1
   6722         else if (i > 0)
   6723           MyFlags.Flags.setOrigAlign(1);
   6724         Ins.push_back(MyFlags);
   6725       }
   6726     }
   6727   }
   6728 
   6729   // Call the target to set up the argument values.
   6730   SmallVector<SDValue, 8> InVals;
   6731   SDValue NewRoot = TLI->LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
   6732                                               F.isVarArg(), Ins,
   6733                                               dl, DAG, InVals);
   6734 
   6735   // Verify that the target's LowerFormalArguments behaved as expected.
   6736   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
   6737          "LowerFormalArguments didn't return a valid chain!");
   6738   assert(InVals.size() == Ins.size() &&
   6739          "LowerFormalArguments didn't emit the correct number of values!");
   6740   DEBUG({
   6741       for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
   6742         assert(InVals[i].getNode() &&
   6743                "LowerFormalArguments emitted a null value!");
   6744         assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
   6745                "LowerFormalArguments emitted a value with the wrong type!");
   6746       }
   6747     });
   6748 
   6749   // Update the DAG with the new chain value resulting from argument lowering.
   6750   DAG.setRoot(NewRoot);
   6751 
   6752   // Set up the argument values.
   6753   unsigned i = 0;
   6754   Idx = 1;
   6755   if (!FuncInfo->CanLowerReturn) {
   6756     // Create a virtual register for the sret pointer, and put in a copy
   6757     // from the sret argument into it.
   6758     SmallVector<EVT, 1> ValueVTs;
   6759     ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
   6760     MVT VT = ValueVTs[0].getSimpleVT();
   6761     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
   6762     ISD::NodeType AssertOp = ISD::DELETED_NODE;
   6763     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
   6764                                         RegVT, VT, NULL, AssertOp);
   6765 
   6766     MachineFunction& MF = SDB->DAG.getMachineFunction();
   6767     MachineRegisterInfo& RegInfo = MF.getRegInfo();
   6768     unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
   6769     FuncInfo->DemoteRegister = SRetReg;
   6770     NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(),
   6771                                     SRetReg, ArgValue);
   6772     DAG.setRoot(NewRoot);
   6773 
   6774     // i indexes lowered arguments.  Bump it past the hidden sret argument.
   6775     // Idx indexes LLVM arguments.  Don't touch it.
   6776     ++i;
   6777   }
   6778 
   6779   for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
   6780       ++I, ++Idx) {
   6781     SmallVector<SDValue, 4> ArgValues;
   6782     SmallVector<EVT, 4> ValueVTs;
   6783     ComputeValueVTs(*TLI, I->getType(), ValueVTs);
   6784     unsigned NumValues = ValueVTs.size();
   6785 
   6786     // If this argument is unused then remember its value. It is used to generate
   6787     // debugging information.
   6788     if (I->use_empty() && NumValues) {
   6789       SDB->setUnusedArgValue(I, InVals[i]);
   6790 
   6791       // Also remember any frame index for use in FastISel.
   6792       if (FrameIndexSDNode *FI =
   6793           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
   6794         FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
   6795     }
   6796 
   6797     for (unsigned Val = 0; Val != NumValues; ++Val) {
   6798       EVT VT = ValueVTs[Val];
   6799       MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
   6800       unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
   6801 
   6802       if (!I->use_empty()) {
   6803         ISD::NodeType AssertOp = ISD::DELETED_NODE;
   6804         if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
   6805           AssertOp = ISD::AssertSext;
   6806         else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
   6807           AssertOp = ISD::AssertZext;
   6808 
   6809         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
   6810                                              NumParts, PartVT, VT,
   6811                                              NULL, AssertOp));
   6812       }
   6813 
   6814       i += NumParts;
   6815     }
   6816 
   6817     // We don't need to do anything else for unused arguments.
   6818     if (ArgValues.empty())
   6819       continue;
   6820 
   6821     // Note down frame index.
   6822     if (FrameIndexSDNode *FI =
   6823         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
   6824       FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
   6825 
   6826     SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
   6827                                      SDB->getCurSDLoc());
   6828 
   6829     SDB->setValue(I, Res);
   6830     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
   6831       if (LoadSDNode *LNode =
   6832           dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
   6833         if (FrameIndexSDNode *FI =
   6834             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
   6835         FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
   6836     }
   6837 
   6838     // If this argument is live outside of the entry block, insert a copy from
   6839     // wherever we got it to the vreg that other BB's will reference it as.
   6840     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
   6841       // If we can, though, try to skip creating an unnecessary vreg.
   6842       // FIXME: This isn't very clean... it would be nice to make this more
   6843       // general.  It's also subtly incompatible with the hacks FastISel
   6844       // uses with vregs.
   6845       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
   6846       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
   6847         FuncInfo->ValueMap[I] = Reg;
   6848         continue;
   6849       }
   6850     }
   6851     if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
   6852       FuncInfo->InitializeRegForValue(I);
   6853       SDB->CopyToExportRegsIfNeeded(I);
   6854     }
   6855   }
   6856 
   6857   assert(i == InVals.size() && "Argument register count mismatch!");
   6858 
   6859   // Finally, if the target has anything special to do, allow it to do so.
   6860   // FIXME: this should insert code into the DAG!
   6861   EmitFunctionEntryCode();
   6862 }
   6863 
   6864 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
   6865 /// ensure constants are generated when needed.  Remember the virtual registers
   6866 /// that need to be added to the Machine PHI nodes as input.  We cannot just
   6867 /// directly add them, because expansion might result in multiple MBB's for one
   6868 /// BB.  As such, the start of the BB might correspond to a different MBB than
   6869 /// the end.
   6870 ///
   6871 void
   6872 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
   6873   const TerminatorInst *TI = LLVMBB->getTerminator();
   6874 
   6875   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
   6876 
   6877   // Check successor nodes' PHI nodes that expect a constant to be available
   6878   // from this block.
   6879   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
   6880     const BasicBlock *SuccBB = TI->getSuccessor(succ);
   6881     if (!isa<PHINode>(SuccBB->begin())) continue;
   6882     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
   6883 
   6884     // If this terminator has multiple identical successors (common for
   6885     // switches), only handle each succ once.
   6886     if (!SuccsHandled.insert(SuccMBB)) continue;
   6887 
   6888     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
   6889 
   6890     // At this point we know that there is a 1-1 correspondence between LLVM PHI
   6891     // nodes and Machine PHI nodes, but the incoming operands have not been
   6892     // emitted yet.
   6893     for (BasicBlock::const_iterator I = SuccBB->begin();
   6894          const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
   6895       // Ignore dead phi's.
   6896       if (PN->use_empty()) continue;
   6897 
   6898       // Skip empty types
   6899       if (PN->getType()->isEmptyTy())
   6900         continue;
   6901 
   6902       unsigned Reg;
   6903       const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
   6904 
   6905       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
   6906         unsigned &RegOut = ConstantsOut[C];
   6907         if (RegOut == 0) {
   6908           RegOut = FuncInfo.CreateRegs(C->getType());
   6909           CopyValueToVirtualRegister(C, RegOut);
   6910         }
   6911         Reg = RegOut;
   6912       } else {
   6913         DenseMap<const Value *, unsigned>::iterator I =
   6914           FuncInfo.ValueMap.find(PHIOp);
   6915         if (I != FuncInfo.ValueMap.end())
   6916           Reg = I->second;
   6917         else {
   6918           assert(isa<AllocaInst>(PHIOp) &&
   6919                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
   6920                  "Didn't codegen value into a register!??");
   6921           Reg = FuncInfo.CreateRegs(PHIOp->getType());
   6922           CopyValueToVirtualRegister(PHIOp, Reg);
   6923         }
   6924       }
   6925 
   6926       // Remember that this register needs to added to the machine PHI node as
   6927       // the input for this MBB.
   6928       SmallVector<EVT, 4> ValueVTs;
   6929       const TargetLowering *TLI = TM.getTargetLowering();
   6930       ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
   6931       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
   6932         EVT VT = ValueVTs[vti];
   6933         unsigned NumRegisters = TLI->getNumRegisters(*DAG.getContext(), VT);
   6934         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
   6935           FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
   6936         Reg += NumRegisters;
   6937       }
   6938     }
   6939   }
   6940 
   6941   ConstantsOut.clear();
   6942 }
   6943