Home | History | Annotate | Download | only in SelectionDAG
      1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #define DEBUG_TYPE "isel"
     15 #include "SelectionDAGBuilder.h"
     16 #include "SDNodeDbgValue.h"
     17 #include "llvm/ADT/BitVector.h"
     18 #include "llvm/ADT/SmallSet.h"
     19 #include "llvm/Analysis/AliasAnalysis.h"
     20 #include "llvm/Analysis/BranchProbabilityInfo.h"
     21 #include "llvm/Analysis/ConstantFolding.h"
     22 #include "llvm/Analysis/ValueTracking.h"
     23 #include "llvm/CodeGen/Analysis.h"
     24 #include "llvm/CodeGen/FastISel.h"
     25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
     26 #include "llvm/CodeGen/GCMetadata.h"
     27 #include "llvm/CodeGen/GCStrategy.h"
     28 #include "llvm/CodeGen/MachineFrameInfo.h"
     29 #include "llvm/CodeGen/MachineFunction.h"
     30 #include "llvm/CodeGen/MachineInstrBuilder.h"
     31 #include "llvm/CodeGen/MachineJumpTableInfo.h"
     32 #include "llvm/CodeGen/MachineModuleInfo.h"
     33 #include "llvm/CodeGen/MachineRegisterInfo.h"
     34 #include "llvm/CodeGen/SelectionDAG.h"
     35 #include "llvm/DebugInfo.h"
     36 #include "llvm/IR/CallingConv.h"
     37 #include "llvm/IR/Constants.h"
     38 #include "llvm/IR/DataLayout.h"
     39 #include "llvm/IR/DerivedTypes.h"
     40 #include "llvm/IR/Function.h"
     41 #include "llvm/IR/GlobalVariable.h"
     42 #include "llvm/IR/InlineAsm.h"
     43 #include "llvm/IR/Instructions.h"
     44 #include "llvm/IR/IntrinsicInst.h"
     45 #include "llvm/IR/Intrinsics.h"
     46 #include "llvm/IR/LLVMContext.h"
     47 #include "llvm/IR/Module.h"
     48 #include "llvm/Support/CommandLine.h"
     49 #include "llvm/Support/Debug.h"
     50 #include "llvm/Support/ErrorHandling.h"
     51 #include "llvm/Support/IntegersSubsetMapping.h"
     52 #include "llvm/Support/MathExtras.h"
     53 #include "llvm/Support/raw_ostream.h"
     54 #include "llvm/Target/TargetFrameLowering.h"
     55 #include "llvm/Target/TargetInstrInfo.h"
     56 #include "llvm/Target/TargetIntrinsicInfo.h"
     57 #include "llvm/Target/TargetLibraryInfo.h"
     58 #include "llvm/Target/TargetLowering.h"
     59 #include "llvm/Target/TargetOptions.h"
     60 #include <algorithm>
     61 using namespace llvm;
     62 
     63 /// LimitFloatPrecision - Generate low-precision inline sequences for
     64 /// some float libcalls (6, 8 or 12 bits).
     65 static unsigned LimitFloatPrecision;
     66 
     67 static cl::opt<unsigned, true>
     68 LimitFPPrecision("limit-float-precision",
     69                  cl::desc("Generate low-precision inline sequences "
     70                           "for some float libcalls"),
     71                  cl::location(LimitFloatPrecision),
     72                  cl::init(0));
     73 
     74 // Limit the width of DAG chains. This is important in general to prevent
     75 // prevent DAG-based analysis from blowing up. For example, alias analysis and
     76 // load clustering may not complete in reasonable time. It is difficult to
     77 // recognize and avoid this situation within each individual analysis, and
     78 // future analyses are likely to have the same behavior. Limiting DAG width is
     79 // the safe approach, and will be especially important with global DAGs.
     80 //
     81 // MaxParallelChains default is arbitrarily high to avoid affecting
     82 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
     83 // sequence over this should have been converted to llvm.memcpy by the
     84 // frontend. It easy to induce this behavior with .ll code such as:
     85 // %buffer = alloca [4096 x i8]
     86 // %data = load [4096 x i8]* %argPtr
     87 // store [4096 x i8] %data, [4096 x i8]* %buffer
     88 static const unsigned MaxParallelChains = 64;
     89 
     90 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
     91                                       const SDValue *Parts, unsigned NumParts,
     92                                       MVT PartVT, EVT ValueVT, const Value *V);
     93 
     94 /// getCopyFromParts - Create a value that contains the specified legal parts
     95 /// combined into the value they represent.  If the parts combine to a type
     96 /// larger then ValueVT then AssertOp can be used to specify whether the extra
     97 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
     98 /// (ISD::AssertSext).
     99 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
    100                                 const SDValue *Parts,
    101                                 unsigned NumParts, MVT PartVT, EVT ValueVT,
    102                                 const Value *V,
    103                                 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
    104   if (ValueVT.isVector())
    105     return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
    106                                   PartVT, ValueVT, V);
    107 
    108   assert(NumParts > 0 && "No parts to assemble!");
    109   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    110   SDValue Val = Parts[0];
    111 
    112   if (NumParts > 1) {
    113     // Assemble the value from multiple parts.
    114     if (ValueVT.isInteger()) {
    115       unsigned PartBits = PartVT.getSizeInBits();
    116       unsigned ValueBits = ValueVT.getSizeInBits();
    117 
    118       // Assemble the power of 2 part.
    119       unsigned RoundParts = NumParts & (NumParts - 1) ?
    120         1 << Log2_32(NumParts) : NumParts;
    121       unsigned RoundBits = PartBits * RoundParts;
    122       EVT RoundVT = RoundBits == ValueBits ?
    123         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
    124       SDValue Lo, Hi;
    125 
    126       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
    127 
    128       if (RoundParts > 2) {
    129         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
    130                               PartVT, HalfVT, V);
    131         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
    132                               RoundParts / 2, PartVT, HalfVT, V);
    133       } else {
    134         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
    135         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
    136       }
    137 
    138       if (TLI.isBigEndian())
    139         std::swap(Lo, Hi);
    140 
    141       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
    142 
    143       if (RoundParts < NumParts) {
    144         // Assemble the trailing non-power-of-2 part.
    145         unsigned OddParts = NumParts - RoundParts;
    146         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
    147         Hi = getCopyFromParts(DAG, DL,
    148                               Parts + RoundParts, OddParts, PartVT, OddVT, V);
    149 
    150         // Combine the round and odd parts.
    151         Lo = Val;
    152         if (TLI.isBigEndian())
    153           std::swap(Lo, Hi);
    154         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    155         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
    156         Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
    157                          DAG.getConstant(Lo.getValueType().getSizeInBits(),
    158                                          TLI.getPointerTy()));
    159         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
    160         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
    161       }
    162     } else if (PartVT.isFloatingPoint()) {
    163       // FP split into multiple FP parts (for ppcf128)
    164       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
    165              "Unexpected split");
    166       SDValue Lo, Hi;
    167       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
    168       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
    169       if (TLI.isBigEndian())
    170         std::swap(Lo, Hi);
    171       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
    172     } else {
    173       // FP split into integer parts (soft fp)
    174       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
    175              !PartVT.isVector() && "Unexpected split");
    176       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
    177       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
    178     }
    179   }
    180 
    181   // There is now one part, held in Val.  Correct it to match ValueVT.
    182   EVT PartEVT = Val.getValueType();
    183 
    184   if (PartEVT == ValueVT)
    185     return Val;
    186 
    187   if (PartEVT.isInteger() && ValueVT.isInteger()) {
    188     if (ValueVT.bitsLT(PartEVT)) {
    189       // For a truncate, see if we have any information to
    190       // indicate whether the truncated bits will always be
    191       // zero or sign-extension.
    192       if (AssertOp != ISD::DELETED_NODE)
    193         Val = DAG.getNode(AssertOp, DL, PartEVT, Val,
    194                           DAG.getValueType(ValueVT));
    195       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
    196     }
    197     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
    198   }
    199 
    200   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
    201     // FP_ROUND's are always exact here.
    202     if (ValueVT.bitsLT(Val.getValueType()))
    203       return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
    204                          DAG.getTargetConstant(1, TLI.getPointerTy()));
    205 
    206     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
    207   }
    208 
    209   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
    210     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
    211 
    212   llvm_unreachable("Unknown mismatch!");
    213 }
    214 
    215 /// getCopyFromPartsVector - Create a value that contains the specified legal
    216 /// parts combined into the value they represent.  If the parts combine to a
    217 /// type larger then ValueVT then AssertOp can be used to specify whether the
    218 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
    219 /// ValueVT (ISD::AssertSext).
    220 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
    221                                       const SDValue *Parts, unsigned NumParts,
    222                                       MVT PartVT, EVT ValueVT, const Value *V) {
    223   assert(ValueVT.isVector() && "Not a vector value");
    224   assert(NumParts > 0 && "No parts to assemble!");
    225   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    226   SDValue Val = Parts[0];
    227 
    228   // Handle a multi-element vector.
    229   if (NumParts > 1) {
    230     EVT IntermediateVT;
    231     MVT RegisterVT;
    232     unsigned NumIntermediates;
    233     unsigned NumRegs =
    234     TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
    235                                NumIntermediates, RegisterVT);
    236     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
    237     NumParts = NumRegs; // Silence a compiler warning.
    238     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
    239     assert(RegisterVT == Parts[0].getSimpleValueType() &&
    240            "Part type doesn't match part!");
    241 
    242     // Assemble the parts into intermediate operands.
    243     SmallVector<SDValue, 8> Ops(NumIntermediates);
    244     if (NumIntermediates == NumParts) {
    245       // If the register was not expanded, truncate or copy the value,
    246       // as appropriate.
    247       for (unsigned i = 0; i != NumParts; ++i)
    248         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
    249                                   PartVT, IntermediateVT, V);
    250     } else if (NumParts > 0) {
    251       // If the intermediate type was expanded, build the intermediate
    252       // operands from the parts.
    253       assert(NumParts % NumIntermediates == 0 &&
    254              "Must expand into a divisible number of parts!");
    255       unsigned Factor = NumParts / NumIntermediates;
    256       for (unsigned i = 0; i != NumIntermediates; ++i)
    257         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
    258                                   PartVT, IntermediateVT, V);
    259     }
    260 
    261     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
    262     // intermediate operands.
    263     Val = DAG.getNode(IntermediateVT.isVector() ?
    264                       ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL,
    265                       ValueVT, &Ops[0], NumIntermediates);
    266   }
    267 
    268   // There is now one part, held in Val.  Correct it to match ValueVT.
    269   EVT PartEVT = Val.getValueType();
    270 
    271   if (PartEVT == ValueVT)
    272     return Val;
    273 
    274   if (PartEVT.isVector()) {
    275     // If the element type of the source/dest vectors are the same, but the
    276     // parts vector has more elements than the value vector, then we have a
    277     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
    278     // elements we want.
    279     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
    280       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
    281              "Cannot narrow, it would be a lossy transformation");
    282       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
    283                          DAG.getIntPtrConstant(0));
    284     }
    285 
    286     // Vector/Vector bitcast.
    287     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
    288       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
    289 
    290     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
    291       "Cannot handle this kind of promotion");
    292     // Promoted vector extract
    293     bool Smaller = ValueVT.bitsLE(PartEVT);
    294     return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    295                        DL, ValueVT, Val);
    296 
    297   }
    298 
    299   // Trivial bitcast if the types are the same size and the destination
    300   // vector type is legal.
    301   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
    302       TLI.isTypeLegal(ValueVT))
    303     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
    304 
    305   // Handle cases such as i8 -> <1 x i1>
    306   if (ValueVT.getVectorNumElements() != 1) {
    307     LLVMContext &Ctx = *DAG.getContext();
    308     Twine ErrMsg("non-trivial scalar-to-vector conversion");
    309     if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
    310       if (const CallInst *CI = dyn_cast<CallInst>(I))
    311         if (isa<InlineAsm>(CI->getCalledValue()))
    312           ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
    313       Ctx.emitError(I, ErrMsg);
    314     } else {
    315       Ctx.emitError(ErrMsg);
    316     }
    317     report_fatal_error("Cannot handle scalar-to-vector conversion!");
    318   }
    319 
    320   if (ValueVT.getVectorNumElements() == 1 &&
    321       ValueVT.getVectorElementType() != PartEVT) {
    322     bool Smaller = ValueVT.bitsLE(PartEVT);
    323     Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    324                        DL, ValueVT.getScalarType(), Val);
    325   }
    326 
    327   return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
    328 }
    329 
    330 static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc dl,
    331                                  SDValue Val, SDValue *Parts, unsigned NumParts,
    332                                  MVT PartVT, const Value *V);
    333 
    334 /// getCopyToParts - Create a series of nodes that contain the specified value
    335 /// split into legal parts.  If the parts contain more bits than Val, then, for
    336 /// integers, ExtendKind can be used to specify how to generate the extra bits.
    337 static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
    338                            SDValue Val, SDValue *Parts, unsigned NumParts,
    339                            MVT PartVT, const Value *V,
    340                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
    341   EVT ValueVT = Val.getValueType();
    342 
    343   // Handle the vector case separately.
    344   if (ValueVT.isVector())
    345     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
    346 
    347   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    348   unsigned PartBits = PartVT.getSizeInBits();
    349   unsigned OrigNumParts = NumParts;
    350   assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
    351 
    352   if (NumParts == 0)
    353     return;
    354 
    355   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
    356   EVT PartEVT = PartVT;
    357   if (PartEVT == ValueVT) {
    358     assert(NumParts == 1 && "No-op copy with multiple parts!");
    359     Parts[0] = Val;
    360     return;
    361   }
    362 
    363   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
    364     // If the parts cover more bits than the value has, promote the value.
    365     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
    366       assert(NumParts == 1 && "Do not know what to promote to!");
    367       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
    368     } else {
    369       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
    370              ValueVT.isInteger() &&
    371              "Unknown mismatch!");
    372       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    373       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
    374       if (PartVT == MVT::x86mmx)
    375         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    376     }
    377   } else if (PartBits == ValueVT.getSizeInBits()) {
    378     // Different types of the same size.
    379     assert(NumParts == 1 && PartEVT != ValueVT);
    380     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    381   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
    382     // If the parts cover less bits than value has, truncate the value.
    383     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
    384            ValueVT.isInteger() &&
    385            "Unknown mismatch!");
    386     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    387     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
    388     if (PartVT == MVT::x86mmx)
    389       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    390   }
    391 
    392   // The value may have changed - recompute ValueVT.
    393   ValueVT = Val.getValueType();
    394   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
    395          "Failed to tile the value with PartVT!");
    396 
    397   if (NumParts == 1) {
    398     if (PartEVT != ValueVT) {
    399       LLVMContext &Ctx = *DAG.getContext();
    400       Twine ErrMsg("scalar-to-vector conversion failed");
    401       if (const Instruction *I = dyn_cast_or_null<Instruction>(V)) {
    402         if (const CallInst *CI = dyn_cast<CallInst>(I))
    403           if (isa<InlineAsm>(CI->getCalledValue()))
    404             ErrMsg = ErrMsg + ", possible invalid constraint for vector type";
    405         Ctx.emitError(I, ErrMsg);
    406       } else {
    407         Ctx.emitError(ErrMsg);
    408       }
    409     }
    410 
    411     Parts[0] = Val;
    412     return;
    413   }
    414 
    415   // Expand the value into multiple parts.
    416   if (NumParts & (NumParts - 1)) {
    417     // The number of parts is not a power of 2.  Split off and copy the tail.
    418     assert(PartVT.isInteger() && ValueVT.isInteger() &&
    419            "Do not know what to expand to!");
    420     unsigned RoundParts = 1 << Log2_32(NumParts);
    421     unsigned RoundBits = RoundParts * PartBits;
    422     unsigned OddParts = NumParts - RoundParts;
    423     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
    424                                  DAG.getIntPtrConstant(RoundBits));
    425     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
    426 
    427     if (TLI.isBigEndian())
    428       // The odd parts were reversed by getCopyToParts - unreverse them.
    429       std::reverse(Parts + RoundParts, Parts + NumParts);
    430 
    431     NumParts = RoundParts;
    432     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
    433     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
    434   }
    435 
    436   // The number of parts is a power of 2.  Repeatedly bisect the value using
    437   // EXTRACT_ELEMENT.
    438   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
    439                          EVT::getIntegerVT(*DAG.getContext(),
    440                                            ValueVT.getSizeInBits()),
    441                          Val);
    442 
    443   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
    444     for (unsigned i = 0; i < NumParts; i += StepSize) {
    445       unsigned ThisBits = StepSize * PartBits / 2;
    446       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
    447       SDValue &Part0 = Parts[i];
    448       SDValue &Part1 = Parts[i+StepSize/2];
    449 
    450       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
    451                           ThisVT, Part0, DAG.getIntPtrConstant(1));
    452       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
    453                           ThisVT, Part0, DAG.getIntPtrConstant(0));
    454 
    455       if (ThisBits == PartBits && ThisVT != PartVT) {
    456         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
    457         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
    458       }
    459     }
    460   }
    461 
    462   if (TLI.isBigEndian())
    463     std::reverse(Parts, Parts + OrigNumParts);
    464 }
    465 
    466 
    467 /// getCopyToPartsVector - Create a series of nodes that contain the specified
    468 /// value split into legal parts.
    469 static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
    470                                  SDValue Val, SDValue *Parts, unsigned NumParts,
    471                                  MVT PartVT, const Value *V) {
    472   EVT ValueVT = Val.getValueType();
    473   assert(ValueVT.isVector() && "Not a vector");
    474   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    475 
    476   if (NumParts == 1) {
    477     EVT PartEVT = PartVT;
    478     if (PartEVT == ValueVT) {
    479       // Nothing to do.
    480     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
    481       // Bitconvert vector->vector case.
    482       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
    483     } else if (PartVT.isVector() &&
    484                PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
    485                PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
    486       EVT ElementVT = PartVT.getVectorElementType();
    487       // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
    488       // undef elements.
    489       SmallVector<SDValue, 16> Ops;
    490       for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
    491         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
    492                                   ElementVT, Val, DAG.getIntPtrConstant(i)));
    493 
    494       for (unsigned i = ValueVT.getVectorNumElements(),
    495            e = PartVT.getVectorNumElements(); i != e; ++i)
    496         Ops.push_back(DAG.getUNDEF(ElementVT));
    497 
    498       Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, &Ops[0], Ops.size());
    499 
    500       // FIXME: Use CONCAT for 2x -> 4x.
    501 
    502       //SDValue UndefElts = DAG.getUNDEF(VectorTy);
    503       //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
    504     } else if (PartVT.isVector() &&
    505                PartEVT.getVectorElementType().bitsGE(
    506                  ValueVT.getVectorElementType()) &&
    507                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
    508 
    509       // Promoted vector extract
    510       bool Smaller = PartEVT.bitsLE(ValueVT);
    511       Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    512                         DL, PartVT, Val);
    513     } else{
    514       // Vector -> scalar conversion.
    515       assert(ValueVT.getVectorNumElements() == 1 &&
    516              "Only trivial vector-to-scalar conversions should get here!");
    517       Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
    518                         PartVT, Val, DAG.getIntPtrConstant(0));
    519 
    520       bool Smaller = ValueVT.bitsLE(PartVT);
    521       Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
    522                          DL, PartVT, Val);
    523     }
    524 
    525     Parts[0] = Val;
    526     return;
    527   }
    528 
    529   // Handle a multi-element vector.
    530   EVT IntermediateVT;
    531   MVT RegisterVT;
    532   unsigned NumIntermediates;
    533   unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
    534                                                 IntermediateVT,
    535                                                 NumIntermediates, RegisterVT);
    536   unsigned NumElements = ValueVT.getVectorNumElements();
    537 
    538   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
    539   NumParts = NumRegs; // Silence a compiler warning.
    540   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
    541 
    542   // Split the vector into intermediate operands.
    543   SmallVector<SDValue, 8> Ops(NumIntermediates);
    544   for (unsigned i = 0; i != NumIntermediates; ++i) {
    545     if (IntermediateVT.isVector())
    546       Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
    547                            IntermediateVT, Val,
    548                    DAG.getIntPtrConstant(i * (NumElements / NumIntermediates)));
    549     else
    550       Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
    551                            IntermediateVT, Val, DAG.getIntPtrConstant(i));
    552   }
    553 
    554   // Split the intermediate operands into legal parts.
    555   if (NumParts == NumIntermediates) {
    556     // If the register was not expanded, promote or copy the value,
    557     // as appropriate.
    558     for (unsigned i = 0; i != NumParts; ++i)
    559       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
    560   } else if (NumParts > 0) {
    561     // If the intermediate type was expanded, split each the value into
    562     // legal parts.
    563     assert(NumParts % NumIntermediates == 0 &&
    564            "Must expand into a divisible number of parts!");
    565     unsigned Factor = NumParts / NumIntermediates;
    566     for (unsigned i = 0; i != NumIntermediates; ++i)
    567       getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
    568   }
    569 }
    570 
    571 namespace {
    572   /// RegsForValue - This struct represents the registers (physical or virtual)
    573   /// that a particular set of values is assigned, and the type information
    574   /// about the value. The most common situation is to represent one value at a
    575   /// time, but struct or array values are handled element-wise as multiple
    576   /// values.  The splitting of aggregates is performed recursively, so that we
    577   /// never have aggregate-typed registers. The values at this point do not
    578   /// necessarily have legal types, so each value may require one or more
    579   /// registers of some legal type.
    580   ///
    581   struct RegsForValue {
    582     /// ValueVTs - The value types of the values, which may not be legal, and
    583     /// may need be promoted or synthesized from one or more registers.
    584     ///
    585     SmallVector<EVT, 4> ValueVTs;
    586 
    587     /// RegVTs - The value types of the registers. This is the same size as
    588     /// ValueVTs and it records, for each value, what the type of the assigned
    589     /// register or registers are. (Individual values are never synthesized
    590     /// from more than one type of register.)
    591     ///
    592     /// With virtual registers, the contents of RegVTs is redundant with TLI's
    593     /// getRegisterType member function, however when with physical registers
    594     /// it is necessary to have a separate record of the types.
    595     ///
    596     SmallVector<MVT, 4> RegVTs;
    597 
    598     /// Regs - This list holds the registers assigned to the values.
    599     /// Each legal or promoted value requires one register, and each
    600     /// expanded value requires multiple registers.
    601     ///
    602     SmallVector<unsigned, 4> Regs;
    603 
    604     RegsForValue() {}
    605 
    606     RegsForValue(const SmallVector<unsigned, 4> &regs,
    607                  MVT regvt, EVT valuevt)
    608       : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
    609 
    610     RegsForValue(LLVMContext &Context, const TargetLowering &tli,
    611                  unsigned Reg, Type *Ty) {
    612       ComputeValueVTs(tli, Ty, ValueVTs);
    613 
    614       for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
    615         EVT ValueVT = ValueVTs[Value];
    616         unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
    617         MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
    618         for (unsigned i = 0; i != NumRegs; ++i)
    619           Regs.push_back(Reg + i);
    620         RegVTs.push_back(RegisterVT);
    621         Reg += NumRegs;
    622       }
    623     }
    624 
    625     /// areValueTypesLegal - Return true if types of all the values are legal.
    626     bool areValueTypesLegal(const TargetLowering &TLI) {
    627       for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
    628         MVT RegisterVT = RegVTs[Value];
    629         if (!TLI.isTypeLegal(RegisterVT))
    630           return false;
    631       }
    632       return true;
    633     }
    634 
    635     /// append - Add the specified values to this one.
    636     void append(const RegsForValue &RHS) {
    637       ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
    638       RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
    639       Regs.append(RHS.Regs.begin(), RHS.Regs.end());
    640     }
    641 
    642     /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
    643     /// this value and returns the result as a ValueVTs value.  This uses
    644     /// Chain/Flag as the input and updates them for the output Chain/Flag.
    645     /// If the Flag pointer is NULL, no flag is used.
    646     SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
    647                             DebugLoc dl,
    648                             SDValue &Chain, SDValue *Flag,
    649                             const Value *V = 0) const;
    650 
    651     /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
    652     /// specified value into the registers specified by this object.  This uses
    653     /// Chain/Flag as the input and updates them for the output Chain/Flag.
    654     /// If the Flag pointer is NULL, no flag is used.
    655     void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
    656                        SDValue &Chain, SDValue *Flag, const Value *V) const;
    657 
    658     /// AddInlineAsmOperands - Add this value to the specified inlineasm node
    659     /// operand list.  This adds the code marker, matching input operand index
    660     /// (if applicable), and includes the number of values added into it.
    661     void AddInlineAsmOperands(unsigned Kind,
    662                               bool HasMatching, unsigned MatchingIdx,
    663                               SelectionDAG &DAG,
    664                               std::vector<SDValue> &Ops) const;
    665   };
    666 }
    667 
    668 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
    669 /// this value and returns the result as a ValueVT value.  This uses
    670 /// Chain/Flag as the input and updates them for the output Chain/Flag.
    671 /// If the Flag pointer is NULL, no flag is used.
    672 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
    673                                       FunctionLoweringInfo &FuncInfo,
    674                                       DebugLoc dl,
    675                                       SDValue &Chain, SDValue *Flag,
    676                                       const Value *V) const {
    677   // A Value with type {} or [0 x %t] needs no registers.
    678   if (ValueVTs.empty())
    679     return SDValue();
    680 
    681   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    682 
    683   // Assemble the legal parts into the final values.
    684   SmallVector<SDValue, 4> Values(ValueVTs.size());
    685   SmallVector<SDValue, 8> Parts;
    686   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
    687     // Copy the legal parts from the registers.
    688     EVT ValueVT = ValueVTs[Value];
    689     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
    690     MVT RegisterVT = RegVTs[Value];
    691 
    692     Parts.resize(NumRegs);
    693     for (unsigned i = 0; i != NumRegs; ++i) {
    694       SDValue P;
    695       if (Flag == 0) {
    696         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
    697       } else {
    698         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
    699         *Flag = P.getValue(2);
    700       }
    701 
    702       Chain = P.getValue(1);
    703       Parts[i] = P;
    704 
    705       // If the source register was virtual and if we know something about it,
    706       // add an assert node.
    707       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
    708           !RegisterVT.isInteger() || RegisterVT.isVector())
    709         continue;
    710 
    711       const FunctionLoweringInfo::LiveOutInfo *LOI =
    712         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
    713       if (!LOI)
    714         continue;
    715 
    716       unsigned RegSize = RegisterVT.getSizeInBits();
    717       unsigned NumSignBits = LOI->NumSignBits;
    718       unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
    719 
    720       // FIXME: We capture more information than the dag can represent.  For
    721       // now, just use the tightest assertzext/assertsext possible.
    722       bool isSExt = true;
    723       EVT FromVT(MVT::Other);
    724       if (NumSignBits == RegSize)
    725         isSExt = true, FromVT = MVT::i1;   // ASSERT SEXT 1
    726       else if (NumZeroBits >= RegSize-1)
    727         isSExt = false, FromVT = MVT::i1;  // ASSERT ZEXT 1
    728       else if (NumSignBits > RegSize-8)
    729         isSExt = true, FromVT = MVT::i8;   // ASSERT SEXT 8
    730       else if (NumZeroBits >= RegSize-8)
    731         isSExt = false, FromVT = MVT::i8;  // ASSERT ZEXT 8
    732       else if (NumSignBits > RegSize-16)
    733         isSExt = true, FromVT = MVT::i16;  // ASSERT SEXT 16
    734       else if (NumZeroBits >= RegSize-16)
    735         isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
    736       else if (NumSignBits > RegSize-32)
    737         isSExt = true, FromVT = MVT::i32;  // ASSERT SEXT 32
    738       else if (NumZeroBits >= RegSize-32)
    739         isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
    740       else
    741         continue;
    742 
    743       // Add an assertion node.
    744       assert(FromVT != MVT::Other);
    745       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
    746                              RegisterVT, P, DAG.getValueType(FromVT));
    747     }
    748 
    749     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
    750                                      NumRegs, RegisterVT, ValueVT, V);
    751     Part += NumRegs;
    752     Parts.clear();
    753   }
    754 
    755   return DAG.getNode(ISD::MERGE_VALUES, dl,
    756                      DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
    757                      &Values[0], ValueVTs.size());
    758 }
    759 
    760 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
    761 /// specified value into the registers specified by this object.  This uses
    762 /// Chain/Flag as the input and updates them for the output Chain/Flag.
    763 /// If the Flag pointer is NULL, no flag is used.
    764 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
    765                                  SDValue &Chain, SDValue *Flag,
    766                                  const Value *V) const {
    767   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    768 
    769   // Get the list of the values's legal parts.
    770   unsigned NumRegs = Regs.size();
    771   SmallVector<SDValue, 8> Parts(NumRegs);
    772   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
    773     EVT ValueVT = ValueVTs[Value];
    774     unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
    775     MVT RegisterVT = RegVTs[Value];
    776     ISD::NodeType ExtendKind =
    777       TLI.isZExtFree(Val, RegisterVT)? ISD::ZERO_EXTEND: ISD::ANY_EXTEND;
    778 
    779     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
    780                    &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
    781     Part += NumParts;
    782   }
    783 
    784   // Copy the parts into the registers.
    785   SmallVector<SDValue, 8> Chains(NumRegs);
    786   for (unsigned i = 0; i != NumRegs; ++i) {
    787     SDValue Part;
    788     if (Flag == 0) {
    789       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
    790     } else {
    791       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
    792       *Flag = Part.getValue(1);
    793     }
    794 
    795     Chains[i] = Part.getValue(0);
    796   }
    797 
    798   if (NumRegs == 1 || Flag)
    799     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
    800     // flagged to it. That is the CopyToReg nodes and the user are considered
    801     // a single scheduling unit. If we create a TokenFactor and return it as
    802     // chain, then the TokenFactor is both a predecessor (operand) of the
    803     // user as well as a successor (the TF operands are flagged to the user).
    804     // c1, f1 = CopyToReg
    805     // c2, f2 = CopyToReg
    806     // c3     = TokenFactor c1, c2
    807     // ...
    808     //        = op c3, ..., f2
    809     Chain = Chains[NumRegs-1];
    810   else
    811     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
    812 }
    813 
    814 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
    815 /// operand list.  This adds the code marker and includes the number of
    816 /// values added into it.
    817 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
    818                                         unsigned MatchingIdx,
    819                                         SelectionDAG &DAG,
    820                                         std::vector<SDValue> &Ops) const {
    821   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
    822 
    823   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
    824   if (HasMatching)
    825     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
    826   else if (!Regs.empty() &&
    827            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
    828     // Put the register class of the virtual registers in the flag word.  That
    829     // way, later passes can recompute register class constraints for inline
    830     // assembly as well as normal instructions.
    831     // Don't do this for tied operands that can use the regclass information
    832     // from the def.
    833     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
    834     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
    835     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
    836   }
    837 
    838   SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
    839   Ops.push_back(Res);
    840 
    841   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
    842     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
    843     MVT RegisterVT = RegVTs[Value];
    844     for (unsigned i = 0; i != NumRegs; ++i) {
    845       assert(Reg < Regs.size() && "Mismatch in # registers expected");
    846       Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
    847     }
    848   }
    849 }
    850 
    851 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
    852                                const TargetLibraryInfo *li) {
    853   AA = &aa;
    854   GFI = gfi;
    855   LibInfo = li;
    856   TD = DAG.getTarget().getDataLayout();
    857   Context = DAG.getContext();
    858   LPadToCallSiteMap.clear();
    859 }
    860 
    861 /// clear - Clear out the current SelectionDAG and the associated
    862 /// state and prepare this SelectionDAGBuilder object to be used
    863 /// for a new block. This doesn't clear out information about
    864 /// additional blocks that are needed to complete switch lowering
    865 /// or PHI node updating; that information is cleared out as it is
    866 /// consumed.
    867 void SelectionDAGBuilder::clear() {
    868   NodeMap.clear();
    869   UnusedArgNodeMap.clear();
    870   PendingLoads.clear();
    871   PendingExports.clear();
    872   CurDebugLoc = DebugLoc();
    873   HasTailCall = false;
    874 }
    875 
    876 /// clearDanglingDebugInfo - Clear the dangling debug information
    877 /// map. This function is separated from the clear so that debug
    878 /// information that is dangling in a basic block can be properly
    879 /// resolved in a different basic block. This allows the
    880 /// SelectionDAG to resolve dangling debug information attached
    881 /// to PHI nodes.
    882 void SelectionDAGBuilder::clearDanglingDebugInfo() {
    883   DanglingDebugInfoMap.clear();
    884 }
    885 
    886 /// getRoot - Return the current virtual root of the Selection DAG,
    887 /// flushing any PendingLoad items. This must be done before emitting
    888 /// a store or any other node that may need to be ordered after any
    889 /// prior load instructions.
    890 ///
    891 SDValue SelectionDAGBuilder::getRoot() {
    892   if (PendingLoads.empty())
    893     return DAG.getRoot();
    894 
    895   if (PendingLoads.size() == 1) {
    896     SDValue Root = PendingLoads[0];
    897     DAG.setRoot(Root);
    898     PendingLoads.clear();
    899     return Root;
    900   }
    901 
    902   // Otherwise, we have to make a token factor node.
    903   SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
    904                                &PendingLoads[0], PendingLoads.size());
    905   PendingLoads.clear();
    906   DAG.setRoot(Root);
    907   return Root;
    908 }
    909 
    910 /// getControlRoot - Similar to getRoot, but instead of flushing all the
    911 /// PendingLoad items, flush all the PendingExports items. It is necessary
    912 /// to do this before emitting a terminator instruction.
    913 ///
    914 SDValue SelectionDAGBuilder::getControlRoot() {
    915   SDValue Root = DAG.getRoot();
    916 
    917   if (PendingExports.empty())
    918     return Root;
    919 
    920   // Turn all of the CopyToReg chains into one factored node.
    921   if (Root.getOpcode() != ISD::EntryToken) {
    922     unsigned i = 0, e = PendingExports.size();
    923     for (; i != e; ++i) {
    924       assert(PendingExports[i].getNode()->getNumOperands() > 1);
    925       if (PendingExports[i].getNode()->getOperand(0) == Root)
    926         break;  // Don't add the root if we already indirectly depend on it.
    927     }
    928 
    929     if (i == e)
    930       PendingExports.push_back(Root);
    931   }
    932 
    933   Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
    934                      &PendingExports[0],
    935                      PendingExports.size());
    936   PendingExports.clear();
    937   DAG.setRoot(Root);
    938   return Root;
    939 }
    940 
    941 void SelectionDAGBuilder::AssignOrderingToNode(const SDNode *Node) {
    942   if (DAG.GetOrdering(Node) != 0) return; // Already has ordering.
    943   DAG.AssignOrdering(Node, SDNodeOrder);
    944 
    945   for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I)
    946     AssignOrderingToNode(Node->getOperand(I).getNode());
    947 }
    948 
    949 void SelectionDAGBuilder::visit(const Instruction &I) {
    950   // Set up outgoing PHI node register values before emitting the terminator.
    951   if (isa<TerminatorInst>(&I))
    952     HandlePHINodesInSuccessorBlocks(I.getParent());
    953 
    954   CurDebugLoc = I.getDebugLoc();
    955 
    956   visit(I.getOpcode(), I);
    957 
    958   if (!isa<TerminatorInst>(&I) && !HasTailCall)
    959     CopyToExportRegsIfNeeded(&I);
    960 
    961   CurDebugLoc = DebugLoc();
    962 }
    963 
    964 void SelectionDAGBuilder::visitPHI(const PHINode &) {
    965   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
    966 }
    967 
    968 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
    969   // Note: this doesn't use InstVisitor, because it has to work with
    970   // ConstantExpr's in addition to instructions.
    971   switch (Opcode) {
    972   default: llvm_unreachable("Unknown instruction type encountered!");
    973     // Build the switch statement using the Instruction.def file.
    974 #define HANDLE_INST(NUM, OPCODE, CLASS) \
    975     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
    976 #include "llvm/IR/Instruction.def"
    977   }
    978 
    979   // Assign the ordering to the freshly created DAG nodes.
    980   if (NodeMap.count(&I)) {
    981     ++SDNodeOrder;
    982     AssignOrderingToNode(getValue(&I).getNode());
    983   }
    984 }
    985 
    986 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
    987 // generate the debug data structures now that we've seen its definition.
    988 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
    989                                                    SDValue Val) {
    990   DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
    991   if (DDI.getDI()) {
    992     const DbgValueInst *DI = DDI.getDI();
    993     DebugLoc dl = DDI.getdl();
    994     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
    995     MDNode *Variable = DI->getVariable();
    996     uint64_t Offset = DI->getOffset();
    997     SDDbgValue *SDV;
    998     if (Val.getNode()) {
    999       if (!EmitFuncArgumentDbgValue(V, Variable, Offset, Val)) {
   1000         SDV = DAG.getDbgValue(Variable, Val.getNode(),
   1001                               Val.getResNo(), Offset, dl, DbgSDNodeOrder);
   1002         DAG.AddDbgValue(SDV, Val.getNode(), false);
   1003       }
   1004     } else
   1005       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   1006     DanglingDebugInfoMap[V] = DanglingDebugInfo();
   1007   }
   1008 }
   1009 
   1010 /// getValue - Return an SDValue for the given Value.
   1011 SDValue SelectionDAGBuilder::getValue(const Value *V) {
   1012   // If we already have an SDValue for this value, use it. It's important
   1013   // to do this first, so that we don't create a CopyFromReg if we already
   1014   // have a regular SDValue.
   1015   SDValue &N = NodeMap[V];
   1016   if (N.getNode()) return N;
   1017 
   1018   // If there's a virtual register allocated and initialized for this
   1019   // value, use it.
   1020   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
   1021   if (It != FuncInfo.ValueMap.end()) {
   1022     unsigned InReg = It->second;
   1023     RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
   1024     SDValue Chain = DAG.getEntryNode();
   1025     N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V);
   1026     resolveDanglingDebugInfo(V, N);
   1027     return N;
   1028   }
   1029 
   1030   // Otherwise create a new SDValue and remember it.
   1031   SDValue Val = getValueImpl(V);
   1032   NodeMap[V] = Val;
   1033   resolveDanglingDebugInfo(V, Val);
   1034   return Val;
   1035 }
   1036 
   1037 /// getNonRegisterValue - Return an SDValue for the given Value, but
   1038 /// don't look in FuncInfo.ValueMap for a virtual register.
   1039 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
   1040   // If we already have an SDValue for this value, use it.
   1041   SDValue &N = NodeMap[V];
   1042   if (N.getNode()) return N;
   1043 
   1044   // Otherwise create a new SDValue and remember it.
   1045   SDValue Val = getValueImpl(V);
   1046   NodeMap[V] = Val;
   1047   resolveDanglingDebugInfo(V, Val);
   1048   return Val;
   1049 }
   1050 
   1051 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
   1052 /// Create an SDValue for the given value.
   1053 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
   1054   if (const Constant *C = dyn_cast<Constant>(V)) {
   1055     EVT VT = TLI.getValueType(V->getType(), true);
   1056 
   1057     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
   1058       return DAG.getConstant(*CI, VT);
   1059 
   1060     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
   1061       return DAG.getGlobalAddress(GV, getCurDebugLoc(), VT);
   1062 
   1063     if (isa<ConstantPointerNull>(C))
   1064       return DAG.getConstant(0, TLI.getPointerTy());
   1065 
   1066     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
   1067       return DAG.getConstantFP(*CFP, VT);
   1068 
   1069     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
   1070       return DAG.getUNDEF(VT);
   1071 
   1072     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
   1073       visit(CE->getOpcode(), *CE);
   1074       SDValue N1 = NodeMap[V];
   1075       assert(N1.getNode() && "visit didn't populate the NodeMap!");
   1076       return N1;
   1077     }
   1078 
   1079     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
   1080       SmallVector<SDValue, 4> Constants;
   1081       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
   1082            OI != OE; ++OI) {
   1083         SDNode *Val = getValue(*OI).getNode();
   1084         // If the operand is an empty aggregate, there are no values.
   1085         if (!Val) continue;
   1086         // Add each leaf value from the operand to the Constants list
   1087         // to form a flattened list of all the values.
   1088         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
   1089           Constants.push_back(SDValue(Val, i));
   1090       }
   1091 
   1092       return DAG.getMergeValues(&Constants[0], Constants.size(),
   1093                                 getCurDebugLoc());
   1094     }
   1095 
   1096     if (const ConstantDataSequential *CDS =
   1097           dyn_cast<ConstantDataSequential>(C)) {
   1098       SmallVector<SDValue, 4> Ops;
   1099       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
   1100         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
   1101         // Add each leaf value from the operand to the Constants list
   1102         // to form a flattened list of all the values.
   1103         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
   1104           Ops.push_back(SDValue(Val, i));
   1105       }
   1106 
   1107       if (isa<ArrayType>(CDS->getType()))
   1108         return DAG.getMergeValues(&Ops[0], Ops.size(), getCurDebugLoc());
   1109       return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
   1110                                       VT, &Ops[0], Ops.size());
   1111     }
   1112 
   1113     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
   1114       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
   1115              "Unknown struct or array constant!");
   1116 
   1117       SmallVector<EVT, 4> ValueVTs;
   1118       ComputeValueVTs(TLI, C->getType(), ValueVTs);
   1119       unsigned NumElts = ValueVTs.size();
   1120       if (NumElts == 0)
   1121         return SDValue(); // empty struct
   1122       SmallVector<SDValue, 4> Constants(NumElts);
   1123       for (unsigned i = 0; i != NumElts; ++i) {
   1124         EVT EltVT = ValueVTs[i];
   1125         if (isa<UndefValue>(C))
   1126           Constants[i] = DAG.getUNDEF(EltVT);
   1127         else if (EltVT.isFloatingPoint())
   1128           Constants[i] = DAG.getConstantFP(0, EltVT);
   1129         else
   1130           Constants[i] = DAG.getConstant(0, EltVT);
   1131       }
   1132 
   1133       return DAG.getMergeValues(&Constants[0], NumElts,
   1134                                 getCurDebugLoc());
   1135     }
   1136 
   1137     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
   1138       return DAG.getBlockAddress(BA, VT);
   1139 
   1140     VectorType *VecTy = cast<VectorType>(V->getType());
   1141     unsigned NumElements = VecTy->getNumElements();
   1142 
   1143     // Now that we know the number and type of the elements, get that number of
   1144     // elements into the Ops array based on what kind of constant it is.
   1145     SmallVector<SDValue, 16> Ops;
   1146     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
   1147       for (unsigned i = 0; i != NumElements; ++i)
   1148         Ops.push_back(getValue(CV->getOperand(i)));
   1149     } else {
   1150       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
   1151       EVT EltVT = TLI.getValueType(VecTy->getElementType());
   1152 
   1153       SDValue Op;
   1154       if (EltVT.isFloatingPoint())
   1155         Op = DAG.getConstantFP(0, EltVT);
   1156       else
   1157         Op = DAG.getConstant(0, EltVT);
   1158       Ops.assign(NumElements, Op);
   1159     }
   1160 
   1161     // Create a BUILD_VECTOR node.
   1162     return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
   1163                                     VT, &Ops[0], Ops.size());
   1164   }
   1165 
   1166   // If this is a static alloca, generate it as the frameindex instead of
   1167   // computation.
   1168   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
   1169     DenseMap<const AllocaInst*, int>::iterator SI =
   1170       FuncInfo.StaticAllocaMap.find(AI);
   1171     if (SI != FuncInfo.StaticAllocaMap.end())
   1172       return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
   1173   }
   1174 
   1175   // If this is an instruction which fast-isel has deferred, select it now.
   1176   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
   1177     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
   1178     RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType());
   1179     SDValue Chain = DAG.getEntryNode();
   1180     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V);
   1181   }
   1182 
   1183   llvm_unreachable("Can't get register for value!");
   1184 }
   1185 
   1186 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
   1187   SDValue Chain = getControlRoot();
   1188   SmallVector<ISD::OutputArg, 8> Outs;
   1189   SmallVector<SDValue, 8> OutVals;
   1190 
   1191   if (!FuncInfo.CanLowerReturn) {
   1192     unsigned DemoteReg = FuncInfo.DemoteRegister;
   1193     const Function *F = I.getParent()->getParent();
   1194 
   1195     // Emit a store of the return value through the virtual register.
   1196     // Leave Outs empty so that LowerReturn won't try to load return
   1197     // registers the usual way.
   1198     SmallVector<EVT, 1> PtrValueVTs;
   1199     ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()),
   1200                     PtrValueVTs);
   1201 
   1202     SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
   1203     SDValue RetOp = getValue(I.getOperand(0));
   1204 
   1205     SmallVector<EVT, 4> ValueVTs;
   1206     SmallVector<uint64_t, 4> Offsets;
   1207     ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
   1208     unsigned NumValues = ValueVTs.size();
   1209 
   1210     SmallVector<SDValue, 4> Chains(NumValues);
   1211     for (unsigned i = 0; i != NumValues; ++i) {
   1212       SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(),
   1213                                 RetPtr.getValueType(), RetPtr,
   1214                                 DAG.getIntPtrConstant(Offsets[i]));
   1215       Chains[i] =
   1216         DAG.getStore(Chain, getCurDebugLoc(),
   1217                      SDValue(RetOp.getNode(), RetOp.getResNo() + i),
   1218                      // FIXME: better loc info would be nice.
   1219                      Add, MachinePointerInfo(), false, false, 0);
   1220     }
   1221 
   1222     Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
   1223                         MVT::Other, &Chains[0], NumValues);
   1224   } else if (I.getNumOperands() != 0) {
   1225     SmallVector<EVT, 4> ValueVTs;
   1226     ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs);
   1227     unsigned NumValues = ValueVTs.size();
   1228     if (NumValues) {
   1229       SDValue RetOp = getValue(I.getOperand(0));
   1230       for (unsigned j = 0, f = NumValues; j != f; ++j) {
   1231         EVT VT = ValueVTs[j];
   1232 
   1233         ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
   1234 
   1235         const Function *F = I.getParent()->getParent();
   1236         if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
   1237                                             Attribute::SExt))
   1238           ExtendKind = ISD::SIGN_EXTEND;
   1239         else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
   1240                                                  Attribute::ZExt))
   1241           ExtendKind = ISD::ZERO_EXTEND;
   1242 
   1243         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
   1244           VT = TLI.getTypeForExtArgOrReturn(VT.getSimpleVT(), ExtendKind);
   1245 
   1246         unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
   1247         MVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
   1248         SmallVector<SDValue, 4> Parts(NumParts);
   1249         getCopyToParts(DAG, getCurDebugLoc(),
   1250                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
   1251                        &Parts[0], NumParts, PartVT, &I, ExtendKind);
   1252 
   1253         // 'inreg' on function refers to return value
   1254         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
   1255         if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
   1256                                             Attribute::InReg))
   1257           Flags.setInReg();
   1258 
   1259         // Propagate extension type if any
   1260         if (ExtendKind == ISD::SIGN_EXTEND)
   1261           Flags.setSExt();
   1262         else if (ExtendKind == ISD::ZERO_EXTEND)
   1263           Flags.setZExt();
   1264 
   1265         for (unsigned i = 0; i < NumParts; ++i) {
   1266           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
   1267                                         /*isfixed=*/true, 0, 0));
   1268           OutVals.push_back(Parts[i]);
   1269         }
   1270       }
   1271     }
   1272   }
   1273 
   1274   bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
   1275   CallingConv::ID CallConv =
   1276     DAG.getMachineFunction().getFunction()->getCallingConv();
   1277   Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
   1278                           Outs, OutVals, getCurDebugLoc(), DAG);
   1279 
   1280   // Verify that the target's LowerReturn behaved as expected.
   1281   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
   1282          "LowerReturn didn't return a valid chain!");
   1283 
   1284   // Update the DAG with the new chain value resulting from return lowering.
   1285   DAG.setRoot(Chain);
   1286 }
   1287 
   1288 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
   1289 /// created for it, emit nodes to copy the value into the virtual
   1290 /// registers.
   1291 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
   1292   // Skip empty types
   1293   if (V->getType()->isEmptyTy())
   1294     return;
   1295 
   1296   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
   1297   if (VMI != FuncInfo.ValueMap.end()) {
   1298     assert(!V->use_empty() && "Unused value assigned virtual registers!");
   1299     CopyValueToVirtualRegister(V, VMI->second);
   1300   }
   1301 }
   1302 
   1303 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
   1304 /// the current basic block, add it to ValueMap now so that we'll get a
   1305 /// CopyTo/FromReg.
   1306 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
   1307   // No need to export constants.
   1308   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
   1309 
   1310   // Already exported?
   1311   if (FuncInfo.isExportedInst(V)) return;
   1312 
   1313   unsigned Reg = FuncInfo.InitializeRegForValue(V);
   1314   CopyValueToVirtualRegister(V, Reg);
   1315 }
   1316 
   1317 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
   1318                                                      const BasicBlock *FromBB) {
   1319   // The operands of the setcc have to be in this block.  We don't know
   1320   // how to export them from some other block.
   1321   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
   1322     // Can export from current BB.
   1323     if (VI->getParent() == FromBB)
   1324       return true;
   1325 
   1326     // Is already exported, noop.
   1327     return FuncInfo.isExportedInst(V);
   1328   }
   1329 
   1330   // If this is an argument, we can export it if the BB is the entry block or
   1331   // if it is already exported.
   1332   if (isa<Argument>(V)) {
   1333     if (FromBB == &FromBB->getParent()->getEntryBlock())
   1334       return true;
   1335 
   1336     // Otherwise, can only export this if it is already exported.
   1337     return FuncInfo.isExportedInst(V);
   1338   }
   1339 
   1340   // Otherwise, constants can always be exported.
   1341   return true;
   1342 }
   1343 
   1344 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
   1345 uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
   1346                                             const MachineBasicBlock *Dst) const {
   1347   BranchProbabilityInfo *BPI = FuncInfo.BPI;
   1348   if (!BPI)
   1349     return 0;
   1350   const BasicBlock *SrcBB = Src->getBasicBlock();
   1351   const BasicBlock *DstBB = Dst->getBasicBlock();
   1352   return BPI->getEdgeWeight(SrcBB, DstBB);
   1353 }
   1354 
   1355 void SelectionDAGBuilder::
   1356 addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
   1357                        uint32_t Weight /* = 0 */) {
   1358   if (!Weight)
   1359     Weight = getEdgeWeight(Src, Dst);
   1360   Src->addSuccessor(Dst, Weight);
   1361 }
   1362 
   1363 
   1364 static bool InBlock(const Value *V, const BasicBlock *BB) {
   1365   if (const Instruction *I = dyn_cast<Instruction>(V))
   1366     return I->getParent() == BB;
   1367   return true;
   1368 }
   1369 
   1370 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
   1371 /// This function emits a branch and is used at the leaves of an OR or an
   1372 /// AND operator tree.
   1373 ///
   1374 void
   1375 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
   1376                                                   MachineBasicBlock *TBB,
   1377                                                   MachineBasicBlock *FBB,
   1378                                                   MachineBasicBlock *CurBB,
   1379                                                   MachineBasicBlock *SwitchBB) {
   1380   const BasicBlock *BB = CurBB->getBasicBlock();
   1381 
   1382   // If the leaf of the tree is a comparison, merge the condition into
   1383   // the caseblock.
   1384   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
   1385     // The operands of the cmp have to be in this block.  We don't know
   1386     // how to export them from some other block.  If this is the first block
   1387     // of the sequence, no exporting is needed.
   1388     if (CurBB == SwitchBB ||
   1389         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
   1390          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
   1391       ISD::CondCode Condition;
   1392       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
   1393         Condition = getICmpCondCode(IC->getPredicate());
   1394       } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
   1395         Condition = getFCmpCondCode(FC->getPredicate());
   1396         if (TM.Options.NoNaNsFPMath)
   1397           Condition = getFCmpCodeWithoutNaN(Condition);
   1398       } else {
   1399         Condition = ISD::SETEQ; // silence warning.
   1400         llvm_unreachable("Unknown compare instruction");
   1401       }
   1402 
   1403       CaseBlock CB(Condition, BOp->getOperand(0),
   1404                    BOp->getOperand(1), NULL, TBB, FBB, CurBB);
   1405       SwitchCases.push_back(CB);
   1406       return;
   1407     }
   1408   }
   1409 
   1410   // Create a CaseBlock record representing this branch.
   1411   CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
   1412                NULL, TBB, FBB, CurBB);
   1413   SwitchCases.push_back(CB);
   1414 }
   1415 
   1416 /// FindMergedConditions - If Cond is an expression like
   1417 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
   1418                                                MachineBasicBlock *TBB,
   1419                                                MachineBasicBlock *FBB,
   1420                                                MachineBasicBlock *CurBB,
   1421                                                MachineBasicBlock *SwitchBB,
   1422                                                unsigned Opc) {
   1423   // If this node is not part of the or/and tree, emit it as a branch.
   1424   const Instruction *BOp = dyn_cast<Instruction>(Cond);
   1425   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
   1426       (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
   1427       BOp->getParent() != CurBB->getBasicBlock() ||
   1428       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
   1429       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
   1430     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB);
   1431     return;
   1432   }
   1433 
   1434   //  Create TmpBB after CurBB.
   1435   MachineFunction::iterator BBI = CurBB;
   1436   MachineFunction &MF = DAG.getMachineFunction();
   1437   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
   1438   CurBB->getParent()->insert(++BBI, TmpBB);
   1439 
   1440   if (Opc == Instruction::Or) {
   1441     // Codegen X | Y as:
   1442     //   jmp_if_X TBB
   1443     //   jmp TmpBB
   1444     // TmpBB:
   1445     //   jmp_if_Y TBB
   1446     //   jmp FBB
   1447     //
   1448 
   1449     // Emit the LHS condition.
   1450     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc);
   1451 
   1452     // Emit the RHS condition into TmpBB.
   1453     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
   1454   } else {
   1455     assert(Opc == Instruction::And && "Unknown merge op!");
   1456     // Codegen X & Y as:
   1457     //   jmp_if_X TmpBB
   1458     //   jmp FBB
   1459     // TmpBB:
   1460     //   jmp_if_Y TBB
   1461     //   jmp FBB
   1462     //
   1463     //  This requires creation of TmpBB after CurBB.
   1464 
   1465     // Emit the LHS condition.
   1466     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc);
   1467 
   1468     // Emit the RHS condition into TmpBB.
   1469     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc);
   1470   }
   1471 }
   1472 
   1473 /// If the set of cases should be emitted as a series of branches, return true.
   1474 /// If we should emit this as a bunch of and/or'd together conditions, return
   1475 /// false.
   1476 bool
   1477 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
   1478   if (Cases.size() != 2) return true;
   1479 
   1480   // If this is two comparisons of the same values or'd or and'd together, they
   1481   // will get folded into a single comparison, so don't emit two blocks.
   1482   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
   1483        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
   1484       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
   1485        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
   1486     return false;
   1487   }
   1488 
   1489   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
   1490   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
   1491   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
   1492       Cases[0].CC == Cases[1].CC &&
   1493       isa<Constant>(Cases[0].CmpRHS) &&
   1494       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
   1495     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
   1496       return false;
   1497     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
   1498       return false;
   1499   }
   1500 
   1501   return true;
   1502 }
   1503 
   1504 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
   1505   MachineBasicBlock *BrMBB = FuncInfo.MBB;
   1506 
   1507   // Update machine-CFG edges.
   1508   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
   1509 
   1510   // Figure out which block is immediately after the current one.
   1511   MachineBasicBlock *NextBlock = 0;
   1512   MachineFunction::iterator BBI = BrMBB;
   1513   if (++BBI != FuncInfo.MF->end())
   1514     NextBlock = BBI;
   1515 
   1516   if (I.isUnconditional()) {
   1517     // Update machine-CFG edges.
   1518     BrMBB->addSuccessor(Succ0MBB);
   1519 
   1520     // If this is not a fall-through branch, emit the branch.
   1521     if (Succ0MBB != NextBlock)
   1522       DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
   1523                               MVT::Other, getControlRoot(),
   1524                               DAG.getBasicBlock(Succ0MBB)));
   1525 
   1526     return;
   1527   }
   1528 
   1529   // If this condition is one of the special cases we handle, do special stuff
   1530   // now.
   1531   const Value *CondVal = I.getCondition();
   1532   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
   1533 
   1534   // If this is a series of conditions that are or'd or and'd together, emit
   1535   // this as a sequence of branches instead of setcc's with and/or operations.
   1536   // As long as jumps are not expensive, this should improve performance.
   1537   // For example, instead of something like:
   1538   //     cmp A, B
   1539   //     C = seteq
   1540   //     cmp D, E
   1541   //     F = setle
   1542   //     or C, F
   1543   //     jnz foo
   1544   // Emit:
   1545   //     cmp A, B
   1546   //     je foo
   1547   //     cmp D, E
   1548   //     jle foo
   1549   //
   1550   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
   1551     if (!TLI.isJumpExpensive() &&
   1552         BOp->hasOneUse() &&
   1553         (BOp->getOpcode() == Instruction::And ||
   1554          BOp->getOpcode() == Instruction::Or)) {
   1555       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
   1556                            BOp->getOpcode());
   1557       // If the compares in later blocks need to use values not currently
   1558       // exported from this block, export them now.  This block should always
   1559       // be the first entry.
   1560       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
   1561 
   1562       // Allow some cases to be rejected.
   1563       if (ShouldEmitAsBranches(SwitchCases)) {
   1564         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
   1565           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
   1566           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
   1567         }
   1568 
   1569         // Emit the branch for this block.
   1570         visitSwitchCase(SwitchCases[0], BrMBB);
   1571         SwitchCases.erase(SwitchCases.begin());
   1572         return;
   1573       }
   1574 
   1575       // Okay, we decided not to do this, remove any inserted MBB's and clear
   1576       // SwitchCases.
   1577       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
   1578         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
   1579 
   1580       SwitchCases.clear();
   1581     }
   1582   }
   1583 
   1584   // Create a CaseBlock record representing this branch.
   1585   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
   1586                NULL, Succ0MBB, Succ1MBB, BrMBB);
   1587 
   1588   // Use visitSwitchCase to actually insert the fast branch sequence for this
   1589   // cond branch.
   1590   visitSwitchCase(CB, BrMBB);
   1591 }
   1592 
   1593 /// visitSwitchCase - Emits the necessary code to represent a single node in
   1594 /// the binary search tree resulting from lowering a switch instruction.
   1595 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
   1596                                           MachineBasicBlock *SwitchBB) {
   1597   SDValue Cond;
   1598   SDValue CondLHS = getValue(CB.CmpLHS);
   1599   DebugLoc dl = getCurDebugLoc();
   1600 
   1601   // Build the setcc now.
   1602   if (CB.CmpMHS == NULL) {
   1603     // Fold "(X == true)" to X and "(X == false)" to !X to
   1604     // handle common cases produced by branch lowering.
   1605     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
   1606         CB.CC == ISD::SETEQ)
   1607       Cond = CondLHS;
   1608     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
   1609              CB.CC == ISD::SETEQ) {
   1610       SDValue True = DAG.getConstant(1, CondLHS.getValueType());
   1611       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
   1612     } else
   1613       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
   1614   } else {
   1615     assert(CB.CC == ISD::SETCC_INVALID &&
   1616            "Condition is undefined for to-the-range belonging check.");
   1617 
   1618     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
   1619     const APInt& High  = cast<ConstantInt>(CB.CmpRHS)->getValue();
   1620 
   1621     SDValue CmpOp = getValue(CB.CmpMHS);
   1622     EVT VT = CmpOp.getValueType();
   1623 
   1624     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(false)) {
   1625       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
   1626                           ISD::SETULE);
   1627     } else {
   1628       SDValue SUB = DAG.getNode(ISD::SUB, dl,
   1629                                 VT, CmpOp, DAG.getConstant(Low, VT));
   1630       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
   1631                           DAG.getConstant(High-Low, VT), ISD::SETULE);
   1632     }
   1633   }
   1634 
   1635   // Update successor info
   1636   addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
   1637   // TrueBB and FalseBB are always different unless the incoming IR is
   1638   // degenerate. This only happens when running llc on weird IR.
   1639   if (CB.TrueBB != CB.FalseBB)
   1640     addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
   1641 
   1642   // Set NextBlock to be the MBB immediately after the current one, if any.
   1643   // This is used to avoid emitting unnecessary branches to the next block.
   1644   MachineBasicBlock *NextBlock = 0;
   1645   MachineFunction::iterator BBI = SwitchBB;
   1646   if (++BBI != FuncInfo.MF->end())
   1647     NextBlock = BBI;
   1648 
   1649   // If the lhs block is the next block, invert the condition so that we can
   1650   // fall through to the lhs instead of the rhs block.
   1651   if (CB.TrueBB == NextBlock) {
   1652     std::swap(CB.TrueBB, CB.FalseBB);
   1653     SDValue True = DAG.getConstant(1, Cond.getValueType());
   1654     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
   1655   }
   1656 
   1657   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
   1658                                MVT::Other, getControlRoot(), Cond,
   1659                                DAG.getBasicBlock(CB.TrueBB));
   1660 
   1661   // Insert the false branch. Do this even if it's a fall through branch,
   1662   // this makes it easier to do DAG optimizations which require inverting
   1663   // the branch condition.
   1664   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
   1665                        DAG.getBasicBlock(CB.FalseBB));
   1666 
   1667   DAG.setRoot(BrCond);
   1668 }
   1669 
   1670 /// visitJumpTable - Emit JumpTable node in the current MBB
   1671 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
   1672   // Emit the code for the jump table
   1673   assert(JT.Reg != -1U && "Should lower JT Header first!");
   1674   EVT PTy = TLI.getPointerTy();
   1675   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
   1676                                      JT.Reg, PTy);
   1677   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
   1678   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
   1679                                     MVT::Other, Index.getValue(1),
   1680                                     Table, Index);
   1681   DAG.setRoot(BrJumpTable);
   1682 }
   1683 
   1684 /// visitJumpTableHeader - This function emits necessary code to produce index
   1685 /// in the JumpTable from switch case.
   1686 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
   1687                                                JumpTableHeader &JTH,
   1688                                                MachineBasicBlock *SwitchBB) {
   1689   // Subtract the lowest switch case value from the value being switched on and
   1690   // conditional branch to default mbb if the result is greater than the
   1691   // difference between smallest and largest cases.
   1692   SDValue SwitchOp = getValue(JTH.SValue);
   1693   EVT VT = SwitchOp.getValueType();
   1694   SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
   1695                             DAG.getConstant(JTH.First, VT));
   1696 
   1697   // The SDNode we just created, which holds the value being switched on minus
   1698   // the smallest case value, needs to be copied to a virtual register so it
   1699   // can be used as an index into the jump table in a subsequent basic block.
   1700   // This value may be smaller or larger than the target's pointer type, and
   1701   // therefore require extension or truncating.
   1702   SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy());
   1703 
   1704   unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
   1705   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
   1706                                     JumpTableReg, SwitchOp);
   1707   JT.Reg = JumpTableReg;
   1708 
   1709   // Emit the range check for the jump table, and branch to the default block
   1710   // for the switch statement if the value being switched on exceeds the largest
   1711   // case in the switch.
   1712   SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
   1713                              TLI.getSetCCResultType(Sub.getValueType()), Sub,
   1714                              DAG.getConstant(JTH.Last-JTH.First,VT),
   1715                              ISD::SETUGT);
   1716 
   1717   // Set NextBlock to be the MBB immediately after the current one, if any.
   1718   // This is used to avoid emitting unnecessary branches to the next block.
   1719   MachineBasicBlock *NextBlock = 0;
   1720   MachineFunction::iterator BBI = SwitchBB;
   1721 
   1722   if (++BBI != FuncInfo.MF->end())
   1723     NextBlock = BBI;
   1724 
   1725   SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
   1726                                MVT::Other, CopyTo, CMP,
   1727                                DAG.getBasicBlock(JT.Default));
   1728 
   1729   if (JT.MBB != NextBlock)
   1730     BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
   1731                          DAG.getBasicBlock(JT.MBB));
   1732 
   1733   DAG.setRoot(BrCond);
   1734 }
   1735 
   1736 /// visitBitTestHeader - This function emits necessary code to produce value
   1737 /// suitable for "bit tests"
   1738 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
   1739                                              MachineBasicBlock *SwitchBB) {
   1740   // Subtract the minimum value
   1741   SDValue SwitchOp = getValue(B.SValue);
   1742   EVT VT = SwitchOp.getValueType();
   1743   SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
   1744                             DAG.getConstant(B.First, VT));
   1745 
   1746   // Check range
   1747   SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
   1748                                   TLI.getSetCCResultType(Sub.getValueType()),
   1749                                   Sub, DAG.getConstant(B.Range, VT),
   1750                                   ISD::SETUGT);
   1751 
   1752   // Determine the type of the test operands.
   1753   bool UsePtrType = false;
   1754   if (!TLI.isTypeLegal(VT))
   1755     UsePtrType = true;
   1756   else {
   1757     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
   1758       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
   1759         // Switch table case range are encoded into series of masks.
   1760         // Just use pointer type, it's guaranteed to fit.
   1761         UsePtrType = true;
   1762         break;
   1763       }
   1764   }
   1765   if (UsePtrType) {
   1766     VT = TLI.getPointerTy();
   1767     Sub = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), VT);
   1768   }
   1769 
   1770   B.RegVT = VT.getSimpleVT();
   1771   B.Reg = FuncInfo.CreateReg(B.RegVT);
   1772   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
   1773                                     B.Reg, Sub);
   1774 
   1775   // Set NextBlock to be the MBB immediately after the current one, if any.
   1776   // This is used to avoid emitting unnecessary branches to the next block.
   1777   MachineBasicBlock *NextBlock = 0;
   1778   MachineFunction::iterator BBI = SwitchBB;
   1779   if (++BBI != FuncInfo.MF->end())
   1780     NextBlock = BBI;
   1781 
   1782   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
   1783 
   1784   addSuccessorWithWeight(SwitchBB, B.Default);
   1785   addSuccessorWithWeight(SwitchBB, MBB);
   1786 
   1787   SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
   1788                                 MVT::Other, CopyTo, RangeCmp,
   1789                                 DAG.getBasicBlock(B.Default));
   1790 
   1791   if (MBB != NextBlock)
   1792     BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
   1793                           DAG.getBasicBlock(MBB));
   1794 
   1795   DAG.setRoot(BrRange);
   1796 }
   1797 
   1798 /// visitBitTestCase - this function produces one "bit test"
   1799 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
   1800                                            MachineBasicBlock* NextMBB,
   1801                                            uint32_t BranchWeightToNext,
   1802                                            unsigned Reg,
   1803                                            BitTestCase &B,
   1804                                            MachineBasicBlock *SwitchBB) {
   1805   MVT VT = BB.RegVT;
   1806   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
   1807                                        Reg, VT);
   1808   SDValue Cmp;
   1809   unsigned PopCount = CountPopulation_64(B.Mask);
   1810   if (PopCount == 1) {
   1811     // Testing for a single bit; just compare the shift count with what it
   1812     // would need to be to shift a 1 bit in that position.
   1813     Cmp = DAG.getSetCC(getCurDebugLoc(),
   1814                        TLI.getSetCCResultType(VT),
   1815                        ShiftOp,
   1816                        DAG.getConstant(CountTrailingZeros_64(B.Mask), VT),
   1817                        ISD::SETEQ);
   1818   } else if (PopCount == BB.Range) {
   1819     // There is only one zero bit in the range, test for it directly.
   1820     Cmp = DAG.getSetCC(getCurDebugLoc(),
   1821                        TLI.getSetCCResultType(VT),
   1822                        ShiftOp,
   1823                        DAG.getConstant(CountTrailingOnes_64(B.Mask), VT),
   1824                        ISD::SETNE);
   1825   } else {
   1826     // Make desired shift
   1827     SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(), VT,
   1828                                     DAG.getConstant(1, VT), ShiftOp);
   1829 
   1830     // Emit bit tests and jumps
   1831     SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
   1832                                 VT, SwitchVal, DAG.getConstant(B.Mask, VT));
   1833     Cmp = DAG.getSetCC(getCurDebugLoc(),
   1834                        TLI.getSetCCResultType(VT),
   1835                        AndOp, DAG.getConstant(0, VT),
   1836                        ISD::SETNE);
   1837   }
   1838 
   1839   // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
   1840   addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
   1841   // The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
   1842   addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
   1843 
   1844   SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
   1845                               MVT::Other, getControlRoot(),
   1846                               Cmp, DAG.getBasicBlock(B.TargetBB));
   1847 
   1848   // Set NextBlock to be the MBB immediately after the current one, if any.
   1849   // This is used to avoid emitting unnecessary branches to the next block.
   1850   MachineBasicBlock *NextBlock = 0;
   1851   MachineFunction::iterator BBI = SwitchBB;
   1852   if (++BBI != FuncInfo.MF->end())
   1853     NextBlock = BBI;
   1854 
   1855   if (NextMBB != NextBlock)
   1856     BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
   1857                         DAG.getBasicBlock(NextMBB));
   1858 
   1859   DAG.setRoot(BrAnd);
   1860 }
   1861 
   1862 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
   1863   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
   1864 
   1865   // Retrieve successors.
   1866   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
   1867   MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
   1868 
   1869   const Value *Callee(I.getCalledValue());
   1870   const Function *Fn = dyn_cast<Function>(Callee);
   1871   if (isa<InlineAsm>(Callee))
   1872     visitInlineAsm(&I);
   1873   else if (Fn && Fn->isIntrinsic()) {
   1874     assert(Fn->getIntrinsicID() == Intrinsic::donothing);
   1875     // Ignore invokes to @llvm.donothing: jump directly to the next BB.
   1876   } else
   1877     LowerCallTo(&I, getValue(Callee), false, LandingPad);
   1878 
   1879   // If the value of the invoke is used outside of its defining block, make it
   1880   // available as a virtual register.
   1881   CopyToExportRegsIfNeeded(&I);
   1882 
   1883   // Update successor info
   1884   addSuccessorWithWeight(InvokeMBB, Return);
   1885   addSuccessorWithWeight(InvokeMBB, LandingPad);
   1886 
   1887   // Drop into normal successor.
   1888   DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
   1889                           MVT::Other, getControlRoot(),
   1890                           DAG.getBasicBlock(Return)));
   1891 }
   1892 
   1893 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
   1894   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
   1895 }
   1896 
   1897 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
   1898   assert(FuncInfo.MBB->isLandingPad() &&
   1899          "Call to landingpad not in landing pad!");
   1900 
   1901   MachineBasicBlock *MBB = FuncInfo.MBB;
   1902   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   1903   AddLandingPadInfo(LP, MMI, MBB);
   1904 
   1905   // If there aren't registers to copy the values into (e.g., during SjLj
   1906   // exceptions), then don't bother to create these DAG nodes.
   1907   if (TLI.getExceptionPointerRegister() == 0 &&
   1908       TLI.getExceptionSelectorRegister() == 0)
   1909     return;
   1910 
   1911   SmallVector<EVT, 2> ValueVTs;
   1912   ComputeValueVTs(TLI, LP.getType(), ValueVTs);
   1913 
   1914   // Insert the EXCEPTIONADDR instruction.
   1915   assert(FuncInfo.MBB->isLandingPad() &&
   1916          "Call to eh.exception not in landing pad!");
   1917   SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
   1918   SDValue Ops[2];
   1919   Ops[0] = DAG.getRoot();
   1920   SDValue Op1 = DAG.getNode(ISD::EXCEPTIONADDR, getCurDebugLoc(), VTs, Ops, 1);
   1921   SDValue Chain = Op1.getValue(1);
   1922 
   1923   // Insert the EHSELECTION instruction.
   1924   VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
   1925   Ops[0] = Op1;
   1926   Ops[1] = Chain;
   1927   SDValue Op2 = DAG.getNode(ISD::EHSELECTION, getCurDebugLoc(), VTs, Ops, 2);
   1928   Chain = Op2.getValue(1);
   1929   Op2 = DAG.getSExtOrTrunc(Op2, getCurDebugLoc(), MVT::i32);
   1930 
   1931   Ops[0] = Op1;
   1932   Ops[1] = Op2;
   1933   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
   1934                             DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
   1935                             &Ops[0], 2);
   1936 
   1937   std::pair<SDValue, SDValue> RetPair = std::make_pair(Res, Chain);
   1938   setValue(&LP, RetPair.first);
   1939   DAG.setRoot(RetPair.second);
   1940 }
   1941 
   1942 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
   1943 /// small case ranges).
   1944 bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
   1945                                                  CaseRecVector& WorkList,
   1946                                                  const Value* SV,
   1947                                                  MachineBasicBlock *Default,
   1948                                                  MachineBasicBlock *SwitchBB) {
   1949   // Size is the number of Cases represented by this range.
   1950   size_t Size = CR.Range.second - CR.Range.first;
   1951   if (Size > 3)
   1952     return false;
   1953 
   1954   // Get the MachineFunction which holds the current MBB.  This is used when
   1955   // inserting any additional MBBs necessary to represent the switch.
   1956   MachineFunction *CurMF = FuncInfo.MF;
   1957 
   1958   // Figure out which block is immediately after the current one.
   1959   MachineBasicBlock *NextBlock = 0;
   1960   MachineFunction::iterator BBI = CR.CaseBB;
   1961 
   1962   if (++BBI != FuncInfo.MF->end())
   1963     NextBlock = BBI;
   1964 
   1965   BranchProbabilityInfo *BPI = FuncInfo.BPI;
   1966   // If any two of the cases has the same destination, and if one value
   1967   // is the same as the other, but has one bit unset that the other has set,
   1968   // use bit manipulation to do two compares at once.  For example:
   1969   // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
   1970   // TODO: This could be extended to merge any 2 cases in switches with 3 cases.
   1971   // TODO: Handle cases where CR.CaseBB != SwitchBB.
   1972   if (Size == 2 && CR.CaseBB == SwitchBB) {
   1973     Case &Small = *CR.Range.first;
   1974     Case &Big = *(CR.Range.second-1);
   1975 
   1976     if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) {
   1977       const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue();
   1978       const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue();
   1979 
   1980       // Check that there is only one bit different.
   1981       if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 &&
   1982           (SmallValue | BigValue) == BigValue) {
   1983         // Isolate the common bit.
   1984         APInt CommonBit = BigValue & ~SmallValue;
   1985         assert((SmallValue | CommonBit) == BigValue &&
   1986                CommonBit.countPopulation() == 1 && "Not a common bit?");
   1987 
   1988         SDValue CondLHS = getValue(SV);
   1989         EVT VT = CondLHS.getValueType();
   1990         DebugLoc DL = getCurDebugLoc();
   1991 
   1992         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
   1993                                  DAG.getConstant(CommonBit, VT));
   1994         SDValue Cond = DAG.getSetCC(DL, MVT::i1,
   1995                                     Or, DAG.getConstant(BigValue, VT),
   1996                                     ISD::SETEQ);
   1997 
   1998         // Update successor info.
   1999         // Both Small and Big will jump to Small.BB, so we sum up the weights.
   2000         addSuccessorWithWeight(SwitchBB, Small.BB,
   2001                                Small.ExtraWeight + Big.ExtraWeight);
   2002         addSuccessorWithWeight(SwitchBB, Default,
   2003           // The default destination is the first successor in IR.
   2004           BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0);
   2005 
   2006         // Insert the true branch.
   2007         SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
   2008                                      getControlRoot(), Cond,
   2009                                      DAG.getBasicBlock(Small.BB));
   2010 
   2011         // Insert the false branch.
   2012         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
   2013                              DAG.getBasicBlock(Default));
   2014 
   2015         DAG.setRoot(BrCond);
   2016         return true;
   2017       }
   2018     }
   2019   }
   2020 
   2021   // Order cases by weight so the most likely case will be checked first.
   2022   uint32_t UnhandledWeights = 0;
   2023   if (BPI) {
   2024     for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
   2025       uint32_t IWeight = I->ExtraWeight;
   2026       UnhandledWeights += IWeight;
   2027       for (CaseItr J = CR.Range.first; J < I; ++J) {
   2028         uint32_t JWeight = J->ExtraWeight;
   2029         if (IWeight > JWeight)
   2030           std::swap(*I, *J);
   2031       }
   2032     }
   2033   }
   2034   // Rearrange the case blocks so that the last one falls through if possible.
   2035   Case &BackCase = *(CR.Range.second-1);
   2036   if (Size > 1 &&
   2037       NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
   2038     // The last case block won't fall through into 'NextBlock' if we emit the
   2039     // branches in this order.  See if rearranging a case value would help.
   2040     // We start at the bottom as it's the case with the least weight.
   2041     for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I){
   2042       if (I->BB == NextBlock) {
   2043         std::swap(*I, BackCase);
   2044         break;
   2045       }
   2046     }
   2047   }
   2048 
   2049   // Create a CaseBlock record representing a conditional branch to
   2050   // the Case's target mbb if the value being switched on SV is equal
   2051   // to C.
   2052   MachineBasicBlock *CurBlock = CR.CaseBB;
   2053   for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
   2054     MachineBasicBlock *FallThrough;
   2055     if (I != E-1) {
   2056       FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
   2057       CurMF->insert(BBI, FallThrough);
   2058 
   2059       // Put SV in a virtual register to make it available from the new blocks.
   2060       ExportFromCurrentBlock(SV);
   2061     } else {
   2062       // If the last case doesn't match, go to the default block.
   2063       FallThrough = Default;
   2064     }
   2065 
   2066     const Value *RHS, *LHS, *MHS;
   2067     ISD::CondCode CC;
   2068     if (I->High == I->Low) {
   2069       // This is just small small case range :) containing exactly 1 case
   2070       CC = ISD::SETEQ;
   2071       LHS = SV; RHS = I->High; MHS = NULL;
   2072     } else {
   2073       CC = ISD::SETCC_INVALID;
   2074       LHS = I->Low; MHS = SV; RHS = I->High;
   2075     }
   2076 
   2077     // The false weight should be sum of all un-handled cases.
   2078     UnhandledWeights -= I->ExtraWeight;
   2079     CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough,
   2080                  /* me */ CurBlock,
   2081                  /* trueweight */ I->ExtraWeight,
   2082                  /* falseweight */ UnhandledWeights);
   2083 
   2084     // If emitting the first comparison, just call visitSwitchCase to emit the
   2085     // code into the current block.  Otherwise, push the CaseBlock onto the
   2086     // vector to be later processed by SDISel, and insert the node's MBB
   2087     // before the next MBB.
   2088     if (CurBlock == SwitchBB)
   2089       visitSwitchCase(CB, SwitchBB);
   2090     else
   2091       SwitchCases.push_back(CB);
   2092 
   2093     CurBlock = FallThrough;
   2094   }
   2095 
   2096   return true;
   2097 }
   2098 
   2099 static inline bool areJTsAllowed(const TargetLowering &TLI) {
   2100   return TLI.supportJumpTables() &&
   2101           (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
   2102            TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
   2103 }
   2104 
   2105 static APInt ComputeRange(const APInt &First, const APInt &Last) {
   2106   uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
   2107   APInt LastExt = Last.zext(BitWidth), FirstExt = First.zext(BitWidth);
   2108   return (LastExt - FirstExt + 1ULL);
   2109 }
   2110 
   2111 /// handleJTSwitchCase - Emit jumptable for current switch case range
   2112 bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
   2113                                              CaseRecVector &WorkList,
   2114                                              const Value *SV,
   2115                                              MachineBasicBlock *Default,
   2116                                              MachineBasicBlock *SwitchBB) {
   2117   Case& FrontCase = *CR.Range.first;
   2118   Case& BackCase  = *(CR.Range.second-1);
   2119 
   2120   const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
   2121   const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
   2122 
   2123   APInt TSize(First.getBitWidth(), 0);
   2124   for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
   2125     TSize += I->size();
   2126 
   2127   if (!areJTsAllowed(TLI) || TSize.ult(TLI.getMinimumJumpTableEntries()))
   2128     return false;
   2129 
   2130   APInt Range = ComputeRange(First, Last);
   2131   // The density is TSize / Range. Require at least 40%.
   2132   // It should not be possible for IntTSize to saturate for sane code, but make
   2133   // sure we handle Range saturation correctly.
   2134   uint64_t IntRange = Range.getLimitedValue(UINT64_MAX/10);
   2135   uint64_t IntTSize = TSize.getLimitedValue(UINT64_MAX/10);
   2136   if (IntTSize * 10 < IntRange * 4)
   2137     return false;
   2138 
   2139   DEBUG(dbgs() << "Lowering jump table\n"
   2140                << "First entry: " << First << ". Last entry: " << Last << '\n'
   2141                << "Range: " << Range << ". Size: " << TSize << ".\n\n");
   2142 
   2143   // Get the MachineFunction which holds the current MBB.  This is used when
   2144   // inserting any additional MBBs necessary to represent the switch.
   2145   MachineFunction *CurMF = FuncInfo.MF;
   2146 
   2147   // Figure out which block is immediately after the current one.
   2148   MachineFunction::iterator BBI = CR.CaseBB;
   2149   ++BBI;
   2150 
   2151   const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
   2152 
   2153   // Create a new basic block to hold the code for loading the address
   2154   // of the jump table, and jumping to it.  Update successor information;
   2155   // we will either branch to the default case for the switch, or the jump
   2156   // table.
   2157   MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2158   CurMF->insert(BBI, JumpTableBB);
   2159 
   2160   addSuccessorWithWeight(CR.CaseBB, Default);
   2161   addSuccessorWithWeight(CR.CaseBB, JumpTableBB);
   2162 
   2163   // Build a vector of destination BBs, corresponding to each target
   2164   // of the jump table. If the value of the jump table slot corresponds to
   2165   // a case statement, push the case's BB onto the vector, otherwise, push
   2166   // the default BB.
   2167   std::vector<MachineBasicBlock*> DestBBs;
   2168   APInt TEI = First;
   2169   for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
   2170     const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
   2171     const APInt &High = cast<ConstantInt>(I->High)->getValue();
   2172 
   2173     if (Low.ule(TEI) && TEI.ule(High)) {
   2174       DestBBs.push_back(I->BB);
   2175       if (TEI==High)
   2176         ++I;
   2177     } else {
   2178       DestBBs.push_back(Default);
   2179     }
   2180   }
   2181 
   2182   // Calculate weight for each unique destination in CR.
   2183   DenseMap<MachineBasicBlock*, uint32_t> DestWeights;
   2184   if (FuncInfo.BPI)
   2185     for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
   2186       DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
   2187           DestWeights.find(I->BB);
   2188       if (Itr != DestWeights.end())
   2189         Itr->second += I->ExtraWeight;
   2190       else
   2191         DestWeights[I->BB] = I->ExtraWeight;
   2192     }
   2193 
   2194   // Update successor info. Add one edge to each unique successor.
   2195   BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
   2196   for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
   2197          E = DestBBs.end(); I != E; ++I) {
   2198     if (!SuccsHandled[(*I)->getNumber()]) {
   2199       SuccsHandled[(*I)->getNumber()] = true;
   2200       DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
   2201           DestWeights.find(*I);
   2202       addSuccessorWithWeight(JumpTableBB, *I,
   2203                              Itr != DestWeights.end() ? Itr->second : 0);
   2204     }
   2205   }
   2206 
   2207   // Create a jump table index for this jump table.
   2208   unsigned JTEncoding = TLI.getJumpTableEncoding();
   2209   unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
   2210                        ->createJumpTableIndex(DestBBs);
   2211 
   2212   // Set the jump table information so that we can codegen it as a second
   2213   // MachineBasicBlock
   2214   JumpTable JT(-1U, JTI, JumpTableBB, Default);
   2215   JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB));
   2216   if (CR.CaseBB == SwitchBB)
   2217     visitJumpTableHeader(JT, JTH, SwitchBB);
   2218 
   2219   JTCases.push_back(JumpTableBlock(JTH, JT));
   2220   return true;
   2221 }
   2222 
   2223 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
   2224 /// 2 subtrees.
   2225 bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
   2226                                                   CaseRecVector& WorkList,
   2227                                                   const Value* SV,
   2228                                                   MachineBasicBlock *Default,
   2229                                                   MachineBasicBlock *SwitchBB) {
   2230   // Get the MachineFunction which holds the current MBB.  This is used when
   2231   // inserting any additional MBBs necessary to represent the switch.
   2232   MachineFunction *CurMF = FuncInfo.MF;
   2233 
   2234   // Figure out which block is immediately after the current one.
   2235   MachineFunction::iterator BBI = CR.CaseBB;
   2236   ++BBI;
   2237 
   2238   Case& FrontCase = *CR.Range.first;
   2239   Case& BackCase  = *(CR.Range.second-1);
   2240   const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
   2241 
   2242   // Size is the number of Cases represented by this range.
   2243   unsigned Size = CR.Range.second - CR.Range.first;
   2244 
   2245   const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
   2246   const APInt &Last  = cast<ConstantInt>(BackCase.High)->getValue();
   2247   double FMetric = 0;
   2248   CaseItr Pivot = CR.Range.first + Size/2;
   2249 
   2250   // Select optimal pivot, maximizing sum density of LHS and RHS. This will
   2251   // (heuristically) allow us to emit JumpTable's later.
   2252   APInt TSize(First.getBitWidth(), 0);
   2253   for (CaseItr I = CR.Range.first, E = CR.Range.second;
   2254        I!=E; ++I)
   2255     TSize += I->size();
   2256 
   2257   APInt LSize = FrontCase.size();
   2258   APInt RSize = TSize-LSize;
   2259   DEBUG(dbgs() << "Selecting best pivot: \n"
   2260                << "First: " << First << ", Last: " << Last <<'\n'
   2261                << "LSize: " << LSize << ", RSize: " << RSize << '\n');
   2262   for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
   2263        J!=E; ++I, ++J) {
   2264     const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
   2265     const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
   2266     APInt Range = ComputeRange(LEnd, RBegin);
   2267     assert((Range - 2ULL).isNonNegative() &&
   2268            "Invalid case distance");
   2269     // Use volatile double here to avoid excess precision issues on some hosts,
   2270     // e.g. that use 80-bit X87 registers.
   2271     volatile double LDensity =
   2272        (double)LSize.roundToDouble() /
   2273                            (LEnd - First + 1ULL).roundToDouble();
   2274     volatile double RDensity =
   2275       (double)RSize.roundToDouble() /
   2276                            (Last - RBegin + 1ULL).roundToDouble();
   2277     double Metric = Range.logBase2()*(LDensity+RDensity);
   2278     // Should always split in some non-trivial place
   2279     DEBUG(dbgs() <<"=>Step\n"
   2280                  << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
   2281                  << "LDensity: " << LDensity
   2282                  << ", RDensity: " << RDensity << '\n'
   2283                  << "Metric: " << Metric << '\n');
   2284     if (FMetric < Metric) {
   2285       Pivot = J;
   2286       FMetric = Metric;
   2287       DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n');
   2288     }
   2289 
   2290     LSize += J->size();
   2291     RSize -= J->size();
   2292   }
   2293   if (areJTsAllowed(TLI)) {
   2294     // If our case is dense we *really* should handle it earlier!
   2295     assert((FMetric > 0) && "Should handle dense range earlier!");
   2296   } else {
   2297     Pivot = CR.Range.first + Size/2;
   2298   }
   2299 
   2300   CaseRange LHSR(CR.Range.first, Pivot);
   2301   CaseRange RHSR(Pivot, CR.Range.second);
   2302   const Constant *C = Pivot->Low;
   2303   MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
   2304 
   2305   // We know that we branch to the LHS if the Value being switched on is
   2306   // less than the Pivot value, C.  We use this to optimize our binary
   2307   // tree a bit, by recognizing that if SV is greater than or equal to the
   2308   // LHS's Case Value, and that Case Value is exactly one less than the
   2309   // Pivot's Value, then we can branch directly to the LHS's Target,
   2310   // rather than creating a leaf node for it.
   2311   if ((LHSR.second - LHSR.first) == 1 &&
   2312       LHSR.first->High == CR.GE &&
   2313       cast<ConstantInt>(C)->getValue() ==
   2314       (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
   2315     TrueBB = LHSR.first->BB;
   2316   } else {
   2317     TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2318     CurMF->insert(BBI, TrueBB);
   2319     WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
   2320 
   2321     // Put SV in a virtual register to make it available from the new blocks.
   2322     ExportFromCurrentBlock(SV);
   2323   }
   2324 
   2325   // Similar to the optimization above, if the Value being switched on is
   2326   // known to be less than the Constant CR.LT, and the current Case Value
   2327   // is CR.LT - 1, then we can branch directly to the target block for
   2328   // the current Case Value, rather than emitting a RHS leaf node for it.
   2329   if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
   2330       cast<ConstantInt>(RHSR.first->Low)->getValue() ==
   2331       (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
   2332     FalseBB = RHSR.first->BB;
   2333   } else {
   2334     FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2335     CurMF->insert(BBI, FalseBB);
   2336     WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
   2337 
   2338     // Put SV in a virtual register to make it available from the new blocks.
   2339     ExportFromCurrentBlock(SV);
   2340   }
   2341 
   2342   // Create a CaseBlock record representing a conditional branch to
   2343   // the LHS node if the value being switched on SV is less than C.
   2344   // Otherwise, branch to LHS.
   2345   CaseBlock CB(ISD::SETULT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
   2346 
   2347   if (CR.CaseBB == SwitchBB)
   2348     visitSwitchCase(CB, SwitchBB);
   2349   else
   2350     SwitchCases.push_back(CB);
   2351 
   2352   return true;
   2353 }
   2354 
   2355 /// handleBitTestsSwitchCase - if current case range has few destination and
   2356 /// range span less, than machine word bitwidth, encode case range into series
   2357 /// of masks and emit bit tests with these masks.
   2358 bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
   2359                                                    CaseRecVector& WorkList,
   2360                                                    const Value* SV,
   2361                                                    MachineBasicBlock* Default,
   2362                                                    MachineBasicBlock *SwitchBB){
   2363   EVT PTy = TLI.getPointerTy();
   2364   unsigned IntPtrBits = PTy.getSizeInBits();
   2365 
   2366   Case& FrontCase = *CR.Range.first;
   2367   Case& BackCase  = *(CR.Range.second-1);
   2368 
   2369   // Get the MachineFunction which holds the current MBB.  This is used when
   2370   // inserting any additional MBBs necessary to represent the switch.
   2371   MachineFunction *CurMF = FuncInfo.MF;
   2372 
   2373   // If target does not have legal shift left, do not emit bit tests at all.
   2374   if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
   2375     return false;
   2376 
   2377   size_t numCmps = 0;
   2378   for (CaseItr I = CR.Range.first, E = CR.Range.second;
   2379        I!=E; ++I) {
   2380     // Single case counts one, case range - two.
   2381     numCmps += (I->Low == I->High ? 1 : 2);
   2382   }
   2383 
   2384   // Count unique destinations
   2385   SmallSet<MachineBasicBlock*, 4> Dests;
   2386   for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
   2387     Dests.insert(I->BB);
   2388     if (Dests.size() > 3)
   2389       // Don't bother the code below, if there are too much unique destinations
   2390       return false;
   2391   }
   2392   DEBUG(dbgs() << "Total number of unique destinations: "
   2393         << Dests.size() << '\n'
   2394         << "Total number of comparisons: " << numCmps << '\n');
   2395 
   2396   // Compute span of values.
   2397   const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
   2398   const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
   2399   APInt cmpRange = maxValue - minValue;
   2400 
   2401   DEBUG(dbgs() << "Compare range: " << cmpRange << '\n'
   2402                << "Low bound: " << minValue << '\n'
   2403                << "High bound: " << maxValue << '\n');
   2404 
   2405   if (cmpRange.uge(IntPtrBits) ||
   2406       (!(Dests.size() == 1 && numCmps >= 3) &&
   2407        !(Dests.size() == 2 && numCmps >= 5) &&
   2408        !(Dests.size() >= 3 && numCmps >= 6)))
   2409     return false;
   2410 
   2411   DEBUG(dbgs() << "Emitting bit tests\n");
   2412   APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
   2413 
   2414   // Optimize the case where all the case values fit in a
   2415   // word without having to subtract minValue. In this case,
   2416   // we can optimize away the subtraction.
   2417   if (maxValue.ult(IntPtrBits)) {
   2418     cmpRange = maxValue;
   2419   } else {
   2420     lowBound = minValue;
   2421   }
   2422 
   2423   CaseBitsVector CasesBits;
   2424   unsigned i, count = 0;
   2425 
   2426   for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
   2427     MachineBasicBlock* Dest = I->BB;
   2428     for (i = 0; i < count; ++i)
   2429       if (Dest == CasesBits[i].BB)
   2430         break;
   2431 
   2432     if (i == count) {
   2433       assert((count < 3) && "Too much destinations to test!");
   2434       CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/));
   2435       count++;
   2436     }
   2437 
   2438     const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
   2439     const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
   2440 
   2441     uint64_t lo = (lowValue - lowBound).getZExtValue();
   2442     uint64_t hi = (highValue - lowBound).getZExtValue();
   2443     CasesBits[i].ExtraWeight += I->ExtraWeight;
   2444 
   2445     for (uint64_t j = lo; j <= hi; j++) {
   2446       CasesBits[i].Mask |=  1ULL << j;
   2447       CasesBits[i].Bits++;
   2448     }
   2449 
   2450   }
   2451   std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
   2452 
   2453   BitTestInfo BTC;
   2454 
   2455   // Figure out which block is immediately after the current one.
   2456   MachineFunction::iterator BBI = CR.CaseBB;
   2457   ++BBI;
   2458 
   2459   const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
   2460 
   2461   DEBUG(dbgs() << "Cases:\n");
   2462   for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
   2463     DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask
   2464                  << ", Bits: " << CasesBits[i].Bits
   2465                  << ", BB: " << CasesBits[i].BB << '\n');
   2466 
   2467     MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
   2468     CurMF->insert(BBI, CaseBB);
   2469     BTC.push_back(BitTestCase(CasesBits[i].Mask,
   2470                               CaseBB,
   2471                               CasesBits[i].BB, CasesBits[i].ExtraWeight));
   2472 
   2473     // Put SV in a virtual register to make it available from the new blocks.
   2474     ExportFromCurrentBlock(SV);
   2475   }
   2476 
   2477   BitTestBlock BTB(lowBound, cmpRange, SV,
   2478                    -1U, MVT::Other, (CR.CaseBB == SwitchBB),
   2479                    CR.CaseBB, Default, BTC);
   2480 
   2481   if (CR.CaseBB == SwitchBB)
   2482     visitBitTestHeader(BTB, SwitchBB);
   2483 
   2484   BitTestCases.push_back(BTB);
   2485 
   2486   return true;
   2487 }
   2488 
   2489 /// Clusterify - Transform simple list of Cases into list of CaseRange's
   2490 size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
   2491                                        const SwitchInst& SI) {
   2492 
   2493   /// Use a shorter form of declaration, and also
   2494   /// show the we want to use CRSBuilder as Clusterifier.
   2495   typedef IntegersSubsetMapping<MachineBasicBlock> Clusterifier;
   2496 
   2497   Clusterifier TheClusterifier;
   2498 
   2499   BranchProbabilityInfo *BPI = FuncInfo.BPI;
   2500   // Start with "simple" cases
   2501   for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
   2502        i != e; ++i) {
   2503     const BasicBlock *SuccBB = i.getCaseSuccessor();
   2504     MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
   2505 
   2506     TheClusterifier.add(i.getCaseValueEx(), SMBB,
   2507         BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0);
   2508   }
   2509 
   2510   TheClusterifier.optimize();
   2511 
   2512   size_t numCmps = 0;
   2513   for (Clusterifier::RangeIterator i = TheClusterifier.begin(),
   2514        e = TheClusterifier.end(); i != e; ++i, ++numCmps) {
   2515     Clusterifier::Cluster &C = *i;
   2516     // Update edge weight for the cluster.
   2517     unsigned W = C.first.Weight;
   2518 
   2519     // FIXME: Currently work with ConstantInt based numbers.
   2520     // Changing it to APInt based is a pretty heavy for this commit.
   2521     Cases.push_back(Case(C.first.getLow().toConstantInt(),
   2522                          C.first.getHigh().toConstantInt(), C.second, W));
   2523 
   2524     if (C.first.getLow() != C.first.getHigh())
   2525     // A range counts double, since it requires two compares.
   2526     ++numCmps;
   2527   }
   2528 
   2529   return numCmps;
   2530 }
   2531 
   2532 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
   2533                                            MachineBasicBlock *Last) {
   2534   // Update JTCases.
   2535   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
   2536     if (JTCases[i].first.HeaderBB == First)
   2537       JTCases[i].first.HeaderBB = Last;
   2538 
   2539   // Update BitTestCases.
   2540   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
   2541     if (BitTestCases[i].Parent == First)
   2542       BitTestCases[i].Parent = Last;
   2543 }
   2544 
   2545 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
   2546   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
   2547 
   2548   // Figure out which block is immediately after the current one.
   2549   MachineBasicBlock *NextBlock = 0;
   2550   MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
   2551 
   2552   // If there is only the default destination, branch to it if it is not the
   2553   // next basic block.  Otherwise, just fall through.
   2554   if (!SI.getNumCases()) {
   2555     // Update machine-CFG edges.
   2556 
   2557     // If this is not a fall-through branch, emit the branch.
   2558     SwitchMBB->addSuccessor(Default);
   2559     if (Default != NextBlock)
   2560       DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
   2561                               MVT::Other, getControlRoot(),
   2562                               DAG.getBasicBlock(Default)));
   2563 
   2564     return;
   2565   }
   2566 
   2567   // If there are any non-default case statements, create a vector of Cases
   2568   // representing each one, and sort the vector so that we can efficiently
   2569   // create a binary search tree from them.
   2570   CaseVector Cases;
   2571   size_t numCmps = Clusterify(Cases, SI);
   2572   DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
   2573                << ". Total compares: " << numCmps << '\n');
   2574   (void)numCmps;
   2575 
   2576   // Get the Value to be switched on and default basic blocks, which will be
   2577   // inserted into CaseBlock records, representing basic blocks in the binary
   2578   // search tree.
   2579   const Value *SV = SI.getCondition();
   2580 
   2581   // Push the initial CaseRec onto the worklist
   2582   CaseRecVector WorkList;
   2583   WorkList.push_back(CaseRec(SwitchMBB,0,0,
   2584                              CaseRange(Cases.begin(),Cases.end())));
   2585 
   2586   while (!WorkList.empty()) {
   2587     // Grab a record representing a case range to process off the worklist
   2588     CaseRec CR = WorkList.back();
   2589     WorkList.pop_back();
   2590 
   2591     if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
   2592       continue;
   2593 
   2594     // If the range has few cases (two or less) emit a series of specific
   2595     // tests.
   2596     if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
   2597       continue;
   2598 
   2599     // If the switch has more than N blocks, and is at least 40% dense, and the
   2600     // target supports indirect branches, then emit a jump table rather than
   2601     // lowering the switch to a binary tree of conditional branches.
   2602     // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries().
   2603     if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
   2604       continue;
   2605 
   2606     // Emit binary tree. We need to pick a pivot, and push left and right ranges
   2607     // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
   2608     handleBTSplitSwitchCase(CR, WorkList, SV, Default, SwitchMBB);
   2609   }
   2610 }
   2611 
   2612 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
   2613   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
   2614 
   2615   // Update machine-CFG edges with unique successors.
   2616   SmallSet<BasicBlock*, 32> Done;
   2617   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
   2618     BasicBlock *BB = I.getSuccessor(i);
   2619     bool Inserted = Done.insert(BB);
   2620     if (!Inserted)
   2621         continue;
   2622 
   2623     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
   2624     addSuccessorWithWeight(IndirectBrMBB, Succ);
   2625   }
   2626 
   2627   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurDebugLoc(),
   2628                           MVT::Other, getControlRoot(),
   2629                           getValue(I.getAddress())));
   2630 }
   2631 
   2632 void SelectionDAGBuilder::visitFSub(const User &I) {
   2633   // -0.0 - X --> fneg
   2634   Type *Ty = I.getType();
   2635   if (isa<Constant>(I.getOperand(0)) &&
   2636       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
   2637     SDValue Op2 = getValue(I.getOperand(1));
   2638     setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
   2639                              Op2.getValueType(), Op2));
   2640     return;
   2641   }
   2642 
   2643   visitBinary(I, ISD::FSUB);
   2644 }
   2645 
   2646 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
   2647   SDValue Op1 = getValue(I.getOperand(0));
   2648   SDValue Op2 = getValue(I.getOperand(1));
   2649   setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
   2650                            Op1.getValueType(), Op1, Op2));
   2651 }
   2652 
   2653 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
   2654   SDValue Op1 = getValue(I.getOperand(0));
   2655   SDValue Op2 = getValue(I.getOperand(1));
   2656 
   2657   EVT ShiftTy = TLI.getShiftAmountTy(Op2.getValueType());
   2658 
   2659   // Coerce the shift amount to the right type if we can.
   2660   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
   2661     unsigned ShiftSize = ShiftTy.getSizeInBits();
   2662     unsigned Op2Size = Op2.getValueType().getSizeInBits();
   2663     DebugLoc DL = getCurDebugLoc();
   2664 
   2665     // If the operand is smaller than the shift count type, promote it.
   2666     if (ShiftSize > Op2Size)
   2667       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
   2668 
   2669     // If the operand is larger than the shift count type but the shift
   2670     // count type has enough bits to represent any shift value, truncate
   2671     // it now. This is a common case and it exposes the truncate to
   2672     // optimization early.
   2673     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
   2674       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
   2675     // Otherwise we'll need to temporarily settle for some other convenient
   2676     // type.  Type legalization will make adjustments once the shiftee is split.
   2677     else
   2678       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
   2679   }
   2680 
   2681   setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
   2682                            Op1.getValueType(), Op1, Op2));
   2683 }
   2684 
   2685 void SelectionDAGBuilder::visitSDiv(const User &I) {
   2686   SDValue Op1 = getValue(I.getOperand(0));
   2687   SDValue Op2 = getValue(I.getOperand(1));
   2688 
   2689   // Turn exact SDivs into multiplications.
   2690   // FIXME: This should be in DAGCombiner, but it doesn't have access to the
   2691   // exact bit.
   2692   if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
   2693       !isa<ConstantSDNode>(Op1) &&
   2694       isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
   2695     setValue(&I, TLI.BuildExactSDIV(Op1, Op2, getCurDebugLoc(), DAG));
   2696   else
   2697     setValue(&I, DAG.getNode(ISD::SDIV, getCurDebugLoc(), Op1.getValueType(),
   2698                              Op1, Op2));
   2699 }
   2700 
   2701 void SelectionDAGBuilder::visitICmp(const User &I) {
   2702   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
   2703   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
   2704     predicate = IC->getPredicate();
   2705   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
   2706     predicate = ICmpInst::Predicate(IC->getPredicate());
   2707   SDValue Op1 = getValue(I.getOperand(0));
   2708   SDValue Op2 = getValue(I.getOperand(1));
   2709   ISD::CondCode Opcode = getICmpCondCode(predicate);
   2710 
   2711   EVT DestVT = TLI.getValueType(I.getType());
   2712   setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
   2713 }
   2714 
   2715 void SelectionDAGBuilder::visitFCmp(const User &I) {
   2716   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
   2717   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
   2718     predicate = FC->getPredicate();
   2719   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
   2720     predicate = FCmpInst::Predicate(FC->getPredicate());
   2721   SDValue Op1 = getValue(I.getOperand(0));
   2722   SDValue Op2 = getValue(I.getOperand(1));
   2723   ISD::CondCode Condition = getFCmpCondCode(predicate);
   2724   if (TM.Options.NoNaNsFPMath)
   2725     Condition = getFCmpCodeWithoutNaN(Condition);
   2726   EVT DestVT = TLI.getValueType(I.getType());
   2727   setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
   2728 }
   2729 
   2730 void SelectionDAGBuilder::visitSelect(const User &I) {
   2731   SmallVector<EVT, 4> ValueVTs;
   2732   ComputeValueVTs(TLI, I.getType(), ValueVTs);
   2733   unsigned NumValues = ValueVTs.size();
   2734   if (NumValues == 0) return;
   2735 
   2736   SmallVector<SDValue, 4> Values(NumValues);
   2737   SDValue Cond     = getValue(I.getOperand(0));
   2738   SDValue TrueVal  = getValue(I.getOperand(1));
   2739   SDValue FalseVal = getValue(I.getOperand(2));
   2740   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
   2741     ISD::VSELECT : ISD::SELECT;
   2742 
   2743   for (unsigned i = 0; i != NumValues; ++i)
   2744     Values[i] = DAG.getNode(OpCode, getCurDebugLoc(),
   2745                             TrueVal.getNode()->getValueType(TrueVal.getResNo()+i),
   2746                             Cond,
   2747                             SDValue(TrueVal.getNode(),
   2748                                     TrueVal.getResNo() + i),
   2749                             SDValue(FalseVal.getNode(),
   2750                                     FalseVal.getResNo() + i));
   2751 
   2752   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
   2753                            DAG.getVTList(&ValueVTs[0], NumValues),
   2754                            &Values[0], NumValues));
   2755 }
   2756 
   2757 void SelectionDAGBuilder::visitTrunc(const User &I) {
   2758   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
   2759   SDValue N = getValue(I.getOperand(0));
   2760   EVT DestVT = TLI.getValueType(I.getType());
   2761   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
   2762 }
   2763 
   2764 void SelectionDAGBuilder::visitZExt(const User &I) {
   2765   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
   2766   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
   2767   SDValue N = getValue(I.getOperand(0));
   2768   EVT DestVT = TLI.getValueType(I.getType());
   2769   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
   2770 }
   2771 
   2772 void SelectionDAGBuilder::visitSExt(const User &I) {
   2773   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
   2774   // SExt also can't be a cast to bool for same reason. So, nothing much to do
   2775   SDValue N = getValue(I.getOperand(0));
   2776   EVT DestVT = TLI.getValueType(I.getType());
   2777   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
   2778 }
   2779 
   2780 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
   2781   // FPTrunc is never a no-op cast, no need to check
   2782   SDValue N = getValue(I.getOperand(0));
   2783   EVT DestVT = TLI.getValueType(I.getType());
   2784   setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
   2785                            DestVT, N,
   2786                            DAG.getTargetConstant(0, TLI.getPointerTy())));
   2787 }
   2788 
   2789 void SelectionDAGBuilder::visitFPExt(const User &I){
   2790   // FPExt is never a no-op cast, no need to check
   2791   SDValue N = getValue(I.getOperand(0));
   2792   EVT DestVT = TLI.getValueType(I.getType());
   2793   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
   2794 }
   2795 
   2796 void SelectionDAGBuilder::visitFPToUI(const User &I) {
   2797   // FPToUI is never a no-op cast, no need to check
   2798   SDValue N = getValue(I.getOperand(0));
   2799   EVT DestVT = TLI.getValueType(I.getType());
   2800   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
   2801 }
   2802 
   2803 void SelectionDAGBuilder::visitFPToSI(const User &I) {
   2804   // FPToSI is never a no-op cast, no need to check
   2805   SDValue N = getValue(I.getOperand(0));
   2806   EVT DestVT = TLI.getValueType(I.getType());
   2807   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
   2808 }
   2809 
   2810 void SelectionDAGBuilder::visitUIToFP(const User &I) {
   2811   // UIToFP is never a no-op cast, no need to check
   2812   SDValue N = getValue(I.getOperand(0));
   2813   EVT DestVT = TLI.getValueType(I.getType());
   2814   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
   2815 }
   2816 
   2817 void SelectionDAGBuilder::visitSIToFP(const User &I){
   2818   // SIToFP is never a no-op cast, no need to check
   2819   SDValue N = getValue(I.getOperand(0));
   2820   EVT DestVT = TLI.getValueType(I.getType());
   2821   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
   2822 }
   2823 
   2824 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
   2825   // What to do depends on the size of the integer and the size of the pointer.
   2826   // We can either truncate, zero extend, or no-op, accordingly.
   2827   SDValue N = getValue(I.getOperand(0));
   2828   EVT DestVT = TLI.getValueType(I.getType());
   2829   setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
   2830 }
   2831 
   2832 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
   2833   // What to do depends on the size of the integer and the size of the pointer.
   2834   // We can either truncate, zero extend, or no-op, accordingly.
   2835   SDValue N = getValue(I.getOperand(0));
   2836   EVT DestVT = TLI.getValueType(I.getType());
   2837   setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
   2838 }
   2839 
   2840 void SelectionDAGBuilder::visitBitCast(const User &I) {
   2841   SDValue N = getValue(I.getOperand(0));
   2842   EVT DestVT = TLI.getValueType(I.getType());
   2843 
   2844   // BitCast assures us that source and destination are the same size so this is
   2845   // either a BITCAST or a no-op.
   2846   if (DestVT != N.getValueType())
   2847     setValue(&I, DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
   2848                              DestVT, N)); // convert types.
   2849   else
   2850     setValue(&I, N);            // noop cast.
   2851 }
   2852 
   2853 void SelectionDAGBuilder::visitInsertElement(const User &I) {
   2854   SDValue InVec = getValue(I.getOperand(0));
   2855   SDValue InVal = getValue(I.getOperand(1));
   2856   SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
   2857                               TLI.getPointerTy(),
   2858                               getValue(I.getOperand(2)));
   2859   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
   2860                            TLI.getValueType(I.getType()),
   2861                            InVec, InVal, InIdx));
   2862 }
   2863 
   2864 void SelectionDAGBuilder::visitExtractElement(const User &I) {
   2865   SDValue InVec = getValue(I.getOperand(0));
   2866   SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
   2867                               TLI.getPointerTy(),
   2868                               getValue(I.getOperand(1)));
   2869   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
   2870                            TLI.getValueType(I.getType()), InVec, InIdx));
   2871 }
   2872 
   2873 // Utility for visitShuffleVector - Return true if every element in Mask,
   2874 // beginning from position Pos and ending in Pos+Size, falls within the
   2875 // specified sequential range [L, L+Pos). or is undef.
   2876 static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
   2877                                 unsigned Pos, unsigned Size, int Low) {
   2878   for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
   2879     if (Mask[i] >= 0 && Mask[i] != Low)
   2880       return false;
   2881   return true;
   2882 }
   2883 
   2884 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
   2885   SDValue Src1 = getValue(I.getOperand(0));
   2886   SDValue Src2 = getValue(I.getOperand(1));
   2887 
   2888   SmallVector<int, 8> Mask;
   2889   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
   2890   unsigned MaskNumElts = Mask.size();
   2891 
   2892   EVT VT = TLI.getValueType(I.getType());
   2893   EVT SrcVT = Src1.getValueType();
   2894   unsigned SrcNumElts = SrcVT.getVectorNumElements();
   2895 
   2896   if (SrcNumElts == MaskNumElts) {
   2897     setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
   2898                                       &Mask[0]));
   2899     return;
   2900   }
   2901 
   2902   // Normalize the shuffle vector since mask and vector length don't match.
   2903   if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
   2904     // Mask is longer than the source vectors and is a multiple of the source
   2905     // vectors.  We can use concatenate vector to make the mask and vectors
   2906     // lengths match.
   2907     if (SrcNumElts*2 == MaskNumElts) {
   2908       // First check for Src1 in low and Src2 in high
   2909       if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
   2910           isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
   2911         // The shuffle is concatenating two vectors together.
   2912         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
   2913                                  VT, Src1, Src2));
   2914         return;
   2915       }
   2916       // Then check for Src2 in low and Src1 in high
   2917       if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
   2918           isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
   2919         // The shuffle is concatenating two vectors together.
   2920         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
   2921                                  VT, Src2, Src1));
   2922         return;
   2923       }
   2924     }
   2925 
   2926     // Pad both vectors with undefs to make them the same length as the mask.
   2927     unsigned NumConcat = MaskNumElts / SrcNumElts;
   2928     bool Src1U = Src1.getOpcode() == ISD::UNDEF;
   2929     bool Src2U = Src2.getOpcode() == ISD::UNDEF;
   2930     SDValue UndefVal = DAG.getUNDEF(SrcVT);
   2931 
   2932     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
   2933     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
   2934     MOps1[0] = Src1;
   2935     MOps2[0] = Src2;
   2936 
   2937     Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
   2938                                                   getCurDebugLoc(), VT,
   2939                                                   &MOps1[0], NumConcat);
   2940     Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
   2941                                                   getCurDebugLoc(), VT,
   2942                                                   &MOps2[0], NumConcat);
   2943 
   2944     // Readjust mask for new input vector length.
   2945     SmallVector<int, 8> MappedOps;
   2946     for (unsigned i = 0; i != MaskNumElts; ++i) {
   2947       int Idx = Mask[i];
   2948       if (Idx >= (int)SrcNumElts)
   2949         Idx -= SrcNumElts - MaskNumElts;
   2950       MappedOps.push_back(Idx);
   2951     }
   2952 
   2953     setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
   2954                                       &MappedOps[0]));
   2955     return;
   2956   }
   2957 
   2958   if (SrcNumElts > MaskNumElts) {
   2959     // Analyze the access pattern of the vector to see if we can extract
   2960     // two subvectors and do the shuffle. The analysis is done by calculating
   2961     // the range of elements the mask access on both vectors.
   2962     int MinRange[2] = { static_cast<int>(SrcNumElts),
   2963                         static_cast<int>(SrcNumElts)};
   2964     int MaxRange[2] = {-1, -1};
   2965 
   2966     for (unsigned i = 0; i != MaskNumElts; ++i) {
   2967       int Idx = Mask[i];
   2968       unsigned Input = 0;
   2969       if (Idx < 0)
   2970         continue;
   2971 
   2972       if (Idx >= (int)SrcNumElts) {
   2973         Input = 1;
   2974         Idx -= SrcNumElts;
   2975       }
   2976       if (Idx > MaxRange[Input])
   2977         MaxRange[Input] = Idx;
   2978       if (Idx < MinRange[Input])
   2979         MinRange[Input] = Idx;
   2980     }
   2981 
   2982     // Check if the access is smaller than the vector size and can we find
   2983     // a reasonable extract index.
   2984     int RangeUse[2] = { -1, -1 };  // 0 = Unused, 1 = Extract, -1 = Can not
   2985                                    // Extract.
   2986     int StartIdx[2];  // StartIdx to extract from
   2987     for (unsigned Input = 0; Input < 2; ++Input) {
   2988       if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
   2989         RangeUse[Input] = 0; // Unused
   2990         StartIdx[Input] = 0;
   2991         continue;
   2992       }
   2993 
   2994       // Find a good start index that is a multiple of the mask length. Then
   2995       // see if the rest of the elements are in range.
   2996       StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
   2997       if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
   2998           StartIdx[Input] + MaskNumElts <= SrcNumElts)
   2999         RangeUse[Input] = 1; // Extract from a multiple of the mask length.
   3000     }
   3001 
   3002     if (RangeUse[0] == 0 && RangeUse[1] == 0) {
   3003       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
   3004       return;
   3005     }
   3006     if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
   3007       // Extract appropriate subvector and generate a vector shuffle
   3008       for (unsigned Input = 0; Input < 2; ++Input) {
   3009         SDValue &Src = Input == 0 ? Src1 : Src2;
   3010         if (RangeUse[Input] == 0)
   3011           Src = DAG.getUNDEF(VT);
   3012         else
   3013           Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
   3014                             Src, DAG.getIntPtrConstant(StartIdx[Input]));
   3015       }
   3016 
   3017       // Calculate new mask.
   3018       SmallVector<int, 8> MappedOps;
   3019       for (unsigned i = 0; i != MaskNumElts; ++i) {
   3020         int Idx = Mask[i];
   3021         if (Idx >= 0) {
   3022           if (Idx < (int)SrcNumElts)
   3023             Idx -= StartIdx[0];
   3024           else
   3025             Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
   3026         }
   3027         MappedOps.push_back(Idx);
   3028       }
   3029 
   3030       setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
   3031                                         &MappedOps[0]));
   3032       return;
   3033     }
   3034   }
   3035 
   3036   // We can't use either concat vectors or extract subvectors so fall back to
   3037   // replacing the shuffle with extract and build vector.
   3038   // to insert and build vector.
   3039   EVT EltVT = VT.getVectorElementType();
   3040   EVT PtrVT = TLI.getPointerTy();
   3041   SmallVector<SDValue,8> Ops;
   3042   for (unsigned i = 0; i != MaskNumElts; ++i) {
   3043     int Idx = Mask[i];
   3044     SDValue Res;
   3045 
   3046     if (Idx < 0) {
   3047       Res = DAG.getUNDEF(EltVT);
   3048     } else {
   3049       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
   3050       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
   3051 
   3052       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
   3053                         EltVT, Src, DAG.getConstant(Idx, PtrVT));
   3054     }
   3055 
   3056     Ops.push_back(Res);
   3057   }
   3058 
   3059   setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
   3060                            VT, &Ops[0], Ops.size()));
   3061 }
   3062 
   3063 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
   3064   const Value *Op0 = I.getOperand(0);
   3065   const Value *Op1 = I.getOperand(1);
   3066   Type *AggTy = I.getType();
   3067   Type *ValTy = Op1->getType();
   3068   bool IntoUndef = isa<UndefValue>(Op0);
   3069   bool FromUndef = isa<UndefValue>(Op1);
   3070 
   3071   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
   3072 
   3073   SmallVector<EVT, 4> AggValueVTs;
   3074   ComputeValueVTs(TLI, AggTy, AggValueVTs);
   3075   SmallVector<EVT, 4> ValValueVTs;
   3076   ComputeValueVTs(TLI, ValTy, ValValueVTs);
   3077 
   3078   unsigned NumAggValues = AggValueVTs.size();
   3079   unsigned NumValValues = ValValueVTs.size();
   3080   SmallVector<SDValue, 4> Values(NumAggValues);
   3081 
   3082   SDValue Agg = getValue(Op0);
   3083   unsigned i = 0;
   3084   // Copy the beginning value(s) from the original aggregate.
   3085   for (; i != LinearIndex; ++i)
   3086     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
   3087                 SDValue(Agg.getNode(), Agg.getResNo() + i);
   3088   // Copy values from the inserted value(s).
   3089   if (NumValValues) {
   3090     SDValue Val = getValue(Op1);
   3091     for (; i != LinearIndex + NumValValues; ++i)
   3092       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
   3093                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
   3094   }
   3095   // Copy remaining value(s) from the original aggregate.
   3096   for (; i != NumAggValues; ++i)
   3097     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
   3098                 SDValue(Agg.getNode(), Agg.getResNo() + i);
   3099 
   3100   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
   3101                            DAG.getVTList(&AggValueVTs[0], NumAggValues),
   3102                            &Values[0], NumAggValues));
   3103 }
   3104 
   3105 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
   3106   const Value *Op0 = I.getOperand(0);
   3107   Type *AggTy = Op0->getType();
   3108   Type *ValTy = I.getType();
   3109   bool OutOfUndef = isa<UndefValue>(Op0);
   3110 
   3111   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
   3112 
   3113   SmallVector<EVT, 4> ValValueVTs;
   3114   ComputeValueVTs(TLI, ValTy, ValValueVTs);
   3115 
   3116   unsigned NumValValues = ValValueVTs.size();
   3117 
   3118   // Ignore a extractvalue that produces an empty object
   3119   if (!NumValValues) {
   3120     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
   3121     return;
   3122   }
   3123 
   3124   SmallVector<SDValue, 4> Values(NumValValues);
   3125 
   3126   SDValue Agg = getValue(Op0);
   3127   // Copy out the selected value(s).
   3128   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
   3129     Values[i - LinearIndex] =
   3130       OutOfUndef ?
   3131         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
   3132         SDValue(Agg.getNode(), Agg.getResNo() + i);
   3133 
   3134   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
   3135                            DAG.getVTList(&ValValueVTs[0], NumValValues),
   3136                            &Values[0], NumValValues));
   3137 }
   3138 
   3139 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
   3140   SDValue N = getValue(I.getOperand(0));
   3141   // Note that the pointer operand may be a vector of pointers. Take the scalar
   3142   // element which holds a pointer.
   3143   Type *Ty = I.getOperand(0)->getType()->getScalarType();
   3144 
   3145   for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
   3146        OI != E; ++OI) {
   3147     const Value *Idx = *OI;
   3148     if (StructType *StTy = dyn_cast<StructType>(Ty)) {
   3149       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
   3150       if (Field) {
   3151         // N = N + Offset
   3152         uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
   3153         N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
   3154                         DAG.getConstant(Offset, N.getValueType()));
   3155       }
   3156 
   3157       Ty = StTy->getElementType(Field);
   3158     } else {
   3159       Ty = cast<SequentialType>(Ty)->getElementType();
   3160 
   3161       // If this is a constant subscript, handle it quickly.
   3162       if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
   3163         if (CI->isZero()) continue;
   3164         uint64_t Offs =
   3165             TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
   3166         SDValue OffsVal;
   3167         EVT PTy = TLI.getPointerTy();
   3168         unsigned PtrBits = PTy.getSizeInBits();
   3169         if (PtrBits < 64)
   3170           OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
   3171                                 TLI.getPointerTy(),
   3172                                 DAG.getConstant(Offs, MVT::i64));
   3173         else
   3174           OffsVal = DAG.getIntPtrConstant(Offs);
   3175 
   3176         N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
   3177                         OffsVal);
   3178         continue;
   3179       }
   3180 
   3181       // N = N + Idx * ElementSize;
   3182       APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
   3183                                 TD->getTypeAllocSize(Ty));
   3184       SDValue IdxN = getValue(Idx);
   3185 
   3186       // If the index is smaller or larger than intptr_t, truncate or extend
   3187       // it.
   3188       IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
   3189 
   3190       // If this is a multiply by a power of two, turn it into a shl
   3191       // immediately.  This is a very common case.
   3192       if (ElementSize != 1) {
   3193         if (ElementSize.isPowerOf2()) {
   3194           unsigned Amt = ElementSize.logBase2();
   3195           IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
   3196                              N.getValueType(), IdxN,
   3197                              DAG.getConstant(Amt, IdxN.getValueType()));
   3198         } else {
   3199           SDValue Scale = DAG.getConstant(ElementSize, IdxN.getValueType());
   3200           IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
   3201                              N.getValueType(), IdxN, Scale);
   3202         }
   3203       }
   3204 
   3205       N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
   3206                       N.getValueType(), N, IdxN);
   3207     }
   3208   }
   3209 
   3210   setValue(&I, N);
   3211 }
   3212 
   3213 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
   3214   // If this is a fixed sized alloca in the entry block of the function,
   3215   // allocate it statically on the stack.
   3216   if (FuncInfo.StaticAllocaMap.count(&I))
   3217     return;   // getValue will auto-populate this.
   3218 
   3219   Type *Ty = I.getAllocatedType();
   3220   uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
   3221   unsigned Align =
   3222     std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
   3223              I.getAlignment());
   3224 
   3225   SDValue AllocSize = getValue(I.getArraySize());
   3226 
   3227   EVT IntPtr = TLI.getPointerTy();
   3228   if (AllocSize.getValueType() != IntPtr)
   3229     AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
   3230 
   3231   AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), IntPtr,
   3232                           AllocSize,
   3233                           DAG.getConstant(TySize, IntPtr));
   3234 
   3235   // Handle alignment.  If the requested alignment is less than or equal to
   3236   // the stack alignment, ignore it.  If the size is greater than or equal to
   3237   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
   3238   unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
   3239   if (Align <= StackAlign)
   3240     Align = 0;
   3241 
   3242   // Round the size of the allocation up to the stack alignment size
   3243   // by add SA-1 to the size.
   3244   AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
   3245                           AllocSize.getValueType(), AllocSize,
   3246                           DAG.getIntPtrConstant(StackAlign-1));
   3247 
   3248   // Mask out the low bits for alignment purposes.
   3249   AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
   3250                           AllocSize.getValueType(), AllocSize,
   3251                           DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
   3252 
   3253   SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
   3254   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
   3255   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
   3256                             VTs, Ops, 3);
   3257   setValue(&I, DSA);
   3258   DAG.setRoot(DSA.getValue(1));
   3259 
   3260   // Inform the Frame Information that we have just allocated a variable-sized
   3261   // object.
   3262   FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1);
   3263 }
   3264 
   3265 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
   3266   if (I.isAtomic())
   3267     return visitAtomicLoad(I);
   3268 
   3269   const Value *SV = I.getOperand(0);
   3270   SDValue Ptr = getValue(SV);
   3271 
   3272   Type *Ty = I.getType();
   3273 
   3274   bool isVolatile = I.isVolatile();
   3275   bool isNonTemporal = I.getMetadata("nontemporal") != 0;
   3276   bool isInvariant = I.getMetadata("invariant.load") != 0;
   3277   unsigned Alignment = I.getAlignment();
   3278   const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
   3279   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
   3280 
   3281   SmallVector<EVT, 4> ValueVTs;
   3282   SmallVector<uint64_t, 4> Offsets;
   3283   ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
   3284   unsigned NumValues = ValueVTs.size();
   3285   if (NumValues == 0)
   3286     return;
   3287 
   3288   SDValue Root;
   3289   bool ConstantMemory = false;
   3290   if (I.isVolatile() || NumValues > MaxParallelChains)
   3291     // Serialize volatile loads with other side effects.
   3292     Root = getRoot();
   3293   else if (AA->pointsToConstantMemory(
   3294              AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), TBAAInfo))) {
   3295     // Do not serialize (non-volatile) loads of constant memory with anything.
   3296     Root = DAG.getEntryNode();
   3297     ConstantMemory = true;
   3298   } else {
   3299     // Do not serialize non-volatile loads against each other.
   3300     Root = DAG.getRoot();
   3301   }
   3302 
   3303   SmallVector<SDValue, 4> Values(NumValues);
   3304   SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
   3305                                           NumValues));
   3306   EVT PtrVT = Ptr.getValueType();
   3307   unsigned ChainI = 0;
   3308   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
   3309     // Serializing loads here may result in excessive register pressure, and
   3310     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
   3311     // could recover a bit by hoisting nodes upward in the chain by recognizing
   3312     // they are side-effect free or do not alias. The optimizer should really
   3313     // avoid this case by converting large object/array copies to llvm.memcpy
   3314     // (MaxParallelChains should always remain as failsafe).
   3315     if (ChainI == MaxParallelChains) {
   3316       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
   3317       SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
   3318                                   MVT::Other, &Chains[0], ChainI);
   3319       Root = Chain;
   3320       ChainI = 0;
   3321     }
   3322     SDValue A = DAG.getNode(ISD::ADD, getCurDebugLoc(),
   3323                             PtrVT, Ptr,
   3324                             DAG.getConstant(Offsets[i], PtrVT));
   3325     SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
   3326                             A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
   3327                             isNonTemporal, isInvariant, Alignment, TBAAInfo,
   3328                             Ranges);
   3329 
   3330     Values[i] = L;
   3331     Chains[ChainI] = L.getValue(1);
   3332   }
   3333 
   3334   if (!ConstantMemory) {
   3335     SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
   3336                                 MVT::Other, &Chains[0], ChainI);
   3337     if (isVolatile)
   3338       DAG.setRoot(Chain);
   3339     else
   3340       PendingLoads.push_back(Chain);
   3341   }
   3342 
   3343   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
   3344                            DAG.getVTList(&ValueVTs[0], NumValues),
   3345                            &Values[0], NumValues));
   3346 }
   3347 
   3348 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
   3349   if (I.isAtomic())
   3350     return visitAtomicStore(I);
   3351 
   3352   const Value *SrcV = I.getOperand(0);
   3353   const Value *PtrV = I.getOperand(1);
   3354 
   3355   SmallVector<EVT, 4> ValueVTs;
   3356   SmallVector<uint64_t, 4> Offsets;
   3357   ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
   3358   unsigned NumValues = ValueVTs.size();
   3359   if (NumValues == 0)
   3360     return;
   3361 
   3362   // Get the lowered operands. Note that we do this after
   3363   // checking if NumResults is zero, because with zero results
   3364   // the operands won't have values in the map.
   3365   SDValue Src = getValue(SrcV);
   3366   SDValue Ptr = getValue(PtrV);
   3367 
   3368   SDValue Root = getRoot();
   3369   SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
   3370                                           NumValues));
   3371   EVT PtrVT = Ptr.getValueType();
   3372   bool isVolatile = I.isVolatile();
   3373   bool isNonTemporal = I.getMetadata("nontemporal") != 0;
   3374   unsigned Alignment = I.getAlignment();
   3375   const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
   3376 
   3377   unsigned ChainI = 0;
   3378   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
   3379     // See visitLoad comments.
   3380     if (ChainI == MaxParallelChains) {
   3381       SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
   3382                                   MVT::Other, &Chains[0], ChainI);
   3383       Root = Chain;
   3384       ChainI = 0;
   3385     }
   3386     SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, Ptr,
   3387                               DAG.getConstant(Offsets[i], PtrVT));
   3388     SDValue St = DAG.getStore(Root, getCurDebugLoc(),
   3389                               SDValue(Src.getNode(), Src.getResNo() + i),
   3390                               Add, MachinePointerInfo(PtrV, Offsets[i]),
   3391                               isVolatile, isNonTemporal, Alignment, TBAAInfo);
   3392     Chains[ChainI] = St;
   3393   }
   3394 
   3395   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
   3396                                   MVT::Other, &Chains[0], ChainI);
   3397   ++SDNodeOrder;
   3398   AssignOrderingToNode(StoreNode.getNode());
   3399   DAG.setRoot(StoreNode);
   3400 }
   3401 
   3402 static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
   3403                                     SynchronizationScope Scope,
   3404                                     bool Before, DebugLoc dl,
   3405                                     SelectionDAG &DAG,
   3406                                     const TargetLowering &TLI) {
   3407   // Fence, if necessary
   3408   if (Before) {
   3409     if (Order == AcquireRelease || Order == SequentiallyConsistent)
   3410       Order = Release;
   3411     else if (Order == Acquire || Order == Monotonic)
   3412       return Chain;
   3413   } else {
   3414     if (Order == AcquireRelease)
   3415       Order = Acquire;
   3416     else if (Order == Release || Order == Monotonic)
   3417       return Chain;
   3418   }
   3419   SDValue Ops[3];
   3420   Ops[0] = Chain;
   3421   Ops[1] = DAG.getConstant(Order, TLI.getPointerTy());
   3422   Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy());
   3423   return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3);
   3424 }
   3425 
   3426 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
   3427   DebugLoc dl = getCurDebugLoc();
   3428   AtomicOrdering Order = I.getOrdering();
   3429   SynchronizationScope Scope = I.getSynchScope();
   3430 
   3431   SDValue InChain = getRoot();
   3432 
   3433   if (TLI.getInsertFencesForAtomic())
   3434     InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
   3435                                    DAG, TLI);
   3436 
   3437   SDValue L =
   3438     DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
   3439                   getValue(I.getCompareOperand()).getValueType().getSimpleVT(),
   3440                   InChain,
   3441                   getValue(I.getPointerOperand()),
   3442                   getValue(I.getCompareOperand()),
   3443                   getValue(I.getNewValOperand()),
   3444                   MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
   3445                   TLI.getInsertFencesForAtomic() ? Monotonic : Order,
   3446                   Scope);
   3447 
   3448   SDValue OutChain = L.getValue(1);
   3449 
   3450   if (TLI.getInsertFencesForAtomic())
   3451     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3452                                     DAG, TLI);
   3453 
   3454   setValue(&I, L);
   3455   DAG.setRoot(OutChain);
   3456 }
   3457 
   3458 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
   3459   DebugLoc dl = getCurDebugLoc();
   3460   ISD::NodeType NT;
   3461   switch (I.getOperation()) {
   3462   default: llvm_unreachable("Unknown atomicrmw operation");
   3463   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
   3464   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
   3465   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
   3466   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
   3467   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
   3468   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
   3469   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
   3470   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
   3471   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
   3472   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
   3473   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
   3474   }
   3475   AtomicOrdering Order = I.getOrdering();
   3476   SynchronizationScope Scope = I.getSynchScope();
   3477 
   3478   SDValue InChain = getRoot();
   3479 
   3480   if (TLI.getInsertFencesForAtomic())
   3481     InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
   3482                                    DAG, TLI);
   3483 
   3484   SDValue L =
   3485     DAG.getAtomic(NT, dl,
   3486                   getValue(I.getValOperand()).getValueType().getSimpleVT(),
   3487                   InChain,
   3488                   getValue(I.getPointerOperand()),
   3489                   getValue(I.getValOperand()),
   3490                   I.getPointerOperand(), 0 /* Alignment */,
   3491                   TLI.getInsertFencesForAtomic() ? Monotonic : Order,
   3492                   Scope);
   3493 
   3494   SDValue OutChain = L.getValue(1);
   3495 
   3496   if (TLI.getInsertFencesForAtomic())
   3497     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3498                                     DAG, TLI);
   3499 
   3500   setValue(&I, L);
   3501   DAG.setRoot(OutChain);
   3502 }
   3503 
   3504 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
   3505   DebugLoc dl = getCurDebugLoc();
   3506   SDValue Ops[3];
   3507   Ops[0] = getRoot();
   3508   Ops[1] = DAG.getConstant(I.getOrdering(), TLI.getPointerTy());
   3509   Ops[2] = DAG.getConstant(I.getSynchScope(), TLI.getPointerTy());
   3510   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops, 3));
   3511 }
   3512 
   3513 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
   3514   DebugLoc dl = getCurDebugLoc();
   3515   AtomicOrdering Order = I.getOrdering();
   3516   SynchronizationScope Scope = I.getSynchScope();
   3517 
   3518   SDValue InChain = getRoot();
   3519 
   3520   EVT VT = TLI.getValueType(I.getType());
   3521 
   3522   if (I.getAlignment() < VT.getSizeInBits() / 8)
   3523     report_fatal_error("Cannot generate unaligned atomic load");
   3524 
   3525   SDValue L =
   3526     DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
   3527                   getValue(I.getPointerOperand()),
   3528                   I.getPointerOperand(), I.getAlignment(),
   3529                   TLI.getInsertFencesForAtomic() ? Monotonic : Order,
   3530                   Scope);
   3531 
   3532   SDValue OutChain = L.getValue(1);
   3533 
   3534   if (TLI.getInsertFencesForAtomic())
   3535     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3536                                     DAG, TLI);
   3537 
   3538   setValue(&I, L);
   3539   DAG.setRoot(OutChain);
   3540 }
   3541 
   3542 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
   3543   DebugLoc dl = getCurDebugLoc();
   3544 
   3545   AtomicOrdering Order = I.getOrdering();
   3546   SynchronizationScope Scope = I.getSynchScope();
   3547 
   3548   SDValue InChain = getRoot();
   3549 
   3550   EVT VT = TLI.getValueType(I.getValueOperand()->getType());
   3551 
   3552   if (I.getAlignment() < VT.getSizeInBits() / 8)
   3553     report_fatal_error("Cannot generate unaligned atomic store");
   3554 
   3555   if (TLI.getInsertFencesForAtomic())
   3556     InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
   3557                                    DAG, TLI);
   3558 
   3559   SDValue OutChain =
   3560     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
   3561                   InChain,
   3562                   getValue(I.getPointerOperand()),
   3563                   getValue(I.getValueOperand()),
   3564                   I.getPointerOperand(), I.getAlignment(),
   3565                   TLI.getInsertFencesForAtomic() ? Monotonic : Order,
   3566                   Scope);
   3567 
   3568   if (TLI.getInsertFencesForAtomic())
   3569     OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
   3570                                     DAG, TLI);
   3571 
   3572   DAG.setRoot(OutChain);
   3573 }
   3574 
   3575 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
   3576 /// node.
   3577 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
   3578                                                unsigned Intrinsic) {
   3579   bool HasChain = !I.doesNotAccessMemory();
   3580   bool OnlyLoad = HasChain && I.onlyReadsMemory();
   3581 
   3582   // Build the operand list.
   3583   SmallVector<SDValue, 8> Ops;
   3584   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
   3585     if (OnlyLoad) {
   3586       // We don't need to serialize loads against other loads.
   3587       Ops.push_back(DAG.getRoot());
   3588     } else {
   3589       Ops.push_back(getRoot());
   3590     }
   3591   }
   3592 
   3593   // Info is set by getTgtMemInstrinsic
   3594   TargetLowering::IntrinsicInfo Info;
   3595   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
   3596 
   3597   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
   3598   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
   3599       Info.opc == ISD::INTRINSIC_W_CHAIN)
   3600     Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI.getPointerTy()));
   3601 
   3602   // Add all operands of the call to the operand list.
   3603   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
   3604     SDValue Op = getValue(I.getArgOperand(i));
   3605     Ops.push_back(Op);
   3606   }
   3607 
   3608   SmallVector<EVT, 4> ValueVTs;
   3609   ComputeValueVTs(TLI, I.getType(), ValueVTs);
   3610 
   3611   if (HasChain)
   3612     ValueVTs.push_back(MVT::Other);
   3613 
   3614   SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
   3615 
   3616   // Create the node.
   3617   SDValue Result;
   3618   if (IsTgtIntrinsic) {
   3619     // This is target intrinsic that touches memory
   3620     Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
   3621                                      VTs, &Ops[0], Ops.size(),
   3622                                      Info.memVT,
   3623                                    MachinePointerInfo(Info.ptrVal, Info.offset),
   3624                                      Info.align, Info.vol,
   3625                                      Info.readMem, Info.writeMem);
   3626   } else if (!HasChain) {
   3627     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
   3628                          VTs, &Ops[0], Ops.size());
   3629   } else if (!I.getType()->isVoidTy()) {
   3630     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
   3631                          VTs, &Ops[0], Ops.size());
   3632   } else {
   3633     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
   3634                          VTs, &Ops[0], Ops.size());
   3635   }
   3636 
   3637   if (HasChain) {
   3638     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
   3639     if (OnlyLoad)
   3640       PendingLoads.push_back(Chain);
   3641     else
   3642       DAG.setRoot(Chain);
   3643   }
   3644 
   3645   if (!I.getType()->isVoidTy()) {
   3646     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
   3647       EVT VT = TLI.getValueType(PTy);
   3648       Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
   3649     }
   3650 
   3651     setValue(&I, Result);
   3652   } else {
   3653     // Assign order to result here. If the intrinsic does not produce a result,
   3654     // it won't be mapped to a SDNode and visit() will not assign it an order
   3655     // number.
   3656     ++SDNodeOrder;
   3657     AssignOrderingToNode(Result.getNode());
   3658   }
   3659 }
   3660 
   3661 /// GetSignificand - Get the significand and build it into a floating-point
   3662 /// number with exponent of 1:
   3663 ///
   3664 ///   Op = (Op & 0x007fffff) | 0x3f800000;
   3665 ///
   3666 /// where Op is the hexadecimal representation of floating point value.
   3667 static SDValue
   3668 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
   3669   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
   3670                            DAG.getConstant(0x007fffff, MVT::i32));
   3671   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
   3672                            DAG.getConstant(0x3f800000, MVT::i32));
   3673   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
   3674 }
   3675 
   3676 /// GetExponent - Get the exponent:
   3677 ///
   3678 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
   3679 ///
   3680 /// where Op is the hexadecimal representation of floating point value.
   3681 static SDValue
   3682 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
   3683             DebugLoc dl) {
   3684   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
   3685                            DAG.getConstant(0x7f800000, MVT::i32));
   3686   SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
   3687                            DAG.getConstant(23, TLI.getPointerTy()));
   3688   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
   3689                            DAG.getConstant(127, MVT::i32));
   3690   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
   3691 }
   3692 
   3693 /// getF32Constant - Get 32-bit floating point constant.
   3694 static SDValue
   3695 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
   3696   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)),
   3697                            MVT::f32);
   3698 }
   3699 
   3700 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
   3701 /// limited-precision mode.
   3702 static SDValue expandExp(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
   3703                          const TargetLowering &TLI) {
   3704   if (Op.getValueType() == MVT::f32 &&
   3705       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   3706 
   3707     // Put the exponent in the right bit position for later addition to the
   3708     // final result:
   3709     //
   3710     //   #define LOG2OFe 1.4426950f
   3711     //   IntegerPartOfX = ((int32_t)(X * LOG2OFe));
   3712     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
   3713                              getF32Constant(DAG, 0x3fb8aa3b));
   3714     SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
   3715 
   3716     //   FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
   3717     SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
   3718     SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
   3719 
   3720     //   IntegerPartOfX <<= 23;
   3721     IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
   3722                                  DAG.getConstant(23, TLI.getPointerTy()));
   3723 
   3724     SDValue TwoToFracPartOfX;
   3725     if (LimitFloatPrecision <= 6) {
   3726       // For floating-point precision of 6:
   3727       //
   3728       //   TwoToFractionalPartOfX =
   3729       //     0.997535578f +
   3730       //       (0.735607626f + 0.252464424f * x) * x;
   3731       //
   3732       // error 0.0144103317, which is 6 bits
   3733       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3734                                getF32Constant(DAG, 0x3e814304));
   3735       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   3736                                getF32Constant(DAG, 0x3f3c50c8));
   3737       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3738       TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3739                                      getF32Constant(DAG, 0x3f7f5e7e));
   3740     } else if (LimitFloatPrecision <= 12) {
   3741       // For floating-point precision of 12:
   3742       //
   3743       //   TwoToFractionalPartOfX =
   3744       //     0.999892986f +
   3745       //       (0.696457318f +
   3746       //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
   3747       //
   3748       // 0.000107046256 error, which is 13 to 14 bits
   3749       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3750                                getF32Constant(DAG, 0x3da235e3));
   3751       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   3752                                getF32Constant(DAG, 0x3e65b8f3));
   3753       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3754       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3755                                getF32Constant(DAG, 0x3f324b07));
   3756       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3757       TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   3758                                      getF32Constant(DAG, 0x3f7ff8fd));
   3759     } else { // LimitFloatPrecision <= 18
   3760       // For floating-point precision of 18:
   3761       //
   3762       //   TwoToFractionalPartOfX =
   3763       //     0.999999982f +
   3764       //       (0.693148872f +
   3765       //         (0.240227044f +
   3766       //           (0.554906021e-1f +
   3767       //             (0.961591928e-2f +
   3768       //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
   3769       //
   3770       // error 2.47208000*10^(-7), which is better than 18 bits
   3771       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3772                                getF32Constant(DAG, 0x3924b03e));
   3773       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   3774                                getF32Constant(DAG, 0x3ab24b87));
   3775       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3776       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3777                                getF32Constant(DAG, 0x3c1d8c17));
   3778       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3779       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   3780                                getF32Constant(DAG, 0x3d634a1d));
   3781       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   3782       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   3783                                getF32Constant(DAG, 0x3e75fe14));
   3784       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   3785       SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
   3786                                 getF32Constant(DAG, 0x3f317234));
   3787       SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
   3788       TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
   3789                                      getF32Constant(DAG, 0x3f800000));
   3790     }
   3791 
   3792     // Add the exponent into the result in integer domain.
   3793     SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFracPartOfX);
   3794     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
   3795                        DAG.getNode(ISD::ADD, dl, MVT::i32,
   3796                                    t13, IntegerPartOfX));
   3797   }
   3798 
   3799   // No special expansion.
   3800   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
   3801 }
   3802 
   3803 /// expandLog - Lower a log intrinsic. Handles the special sequences for
   3804 /// limited-precision mode.
   3805 static SDValue expandLog(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
   3806                          const TargetLowering &TLI) {
   3807   if (Op.getValueType() == MVT::f32 &&
   3808       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   3809     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
   3810 
   3811     // Scale the exponent by log(2) [0.69314718f].
   3812     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
   3813     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
   3814                                         getF32Constant(DAG, 0x3f317218));
   3815 
   3816     // Get the significand and build it into a floating-point number with
   3817     // exponent of 1.
   3818     SDValue X = GetSignificand(DAG, Op1, dl);
   3819 
   3820     SDValue LogOfMantissa;
   3821     if (LimitFloatPrecision <= 6) {
   3822       // For floating-point precision of 6:
   3823       //
   3824       //   LogofMantissa =
   3825       //     -1.1609546f +
   3826       //       (1.4034025f - 0.23903021f * x) * x;
   3827       //
   3828       // error 0.0034276066, which is better than 8 bits
   3829       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3830                                getF32Constant(DAG, 0xbe74c456));
   3831       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3832                                getF32Constant(DAG, 0x3fb3a2b1));
   3833       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3834       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3835                                   getF32Constant(DAG, 0x3f949a29));
   3836     } else if (LimitFloatPrecision <= 12) {
   3837       // For floating-point precision of 12:
   3838       //
   3839       //   LogOfMantissa =
   3840       //     -1.7417939f +
   3841       //       (2.8212026f +
   3842       //         (-1.4699568f +
   3843       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
   3844       //
   3845       // error 0.000061011436, which is 14 bits
   3846       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3847                                getF32Constant(DAG, 0xbd67b6d6));
   3848       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3849                                getF32Constant(DAG, 0x3ee4f4b8));
   3850       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3851       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3852                                getF32Constant(DAG, 0x3fbc278b));
   3853       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3854       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3855                                getF32Constant(DAG, 0x40348e95));
   3856       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3857       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3858                                   getF32Constant(DAG, 0x3fdef31a));
   3859     } else { // LimitFloatPrecision <= 18
   3860       // For floating-point precision of 18:
   3861       //
   3862       //   LogOfMantissa =
   3863       //     -2.1072184f +
   3864       //       (4.2372794f +
   3865       //         (-3.7029485f +
   3866       //           (2.2781945f +
   3867       //             (-0.87823314f +
   3868       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
   3869       //
   3870       // error 0.0000023660568, which is better than 18 bits
   3871       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3872                                getF32Constant(DAG, 0xbc91e5ac));
   3873       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3874                                getF32Constant(DAG, 0x3e4350aa));
   3875       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3876       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3877                                getF32Constant(DAG, 0x3f60d3e3));
   3878       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3879       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3880                                getF32Constant(DAG, 0x4011cdf0));
   3881       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3882       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3883                                getF32Constant(DAG, 0x406cfd1c));
   3884       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   3885       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   3886                                getF32Constant(DAG, 0x408797cb));
   3887       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   3888       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
   3889                                   getF32Constant(DAG, 0x4006dcab));
   3890     }
   3891 
   3892     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
   3893   }
   3894 
   3895   // No special expansion.
   3896   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
   3897 }
   3898 
   3899 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
   3900 /// limited-precision mode.
   3901 static SDValue expandLog2(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
   3902                           const TargetLowering &TLI) {
   3903   if (Op.getValueType() == MVT::f32 &&
   3904       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   3905     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
   3906 
   3907     // Get the exponent.
   3908     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
   3909 
   3910     // Get the significand and build it into a floating-point number with
   3911     // exponent of 1.
   3912     SDValue X = GetSignificand(DAG, Op1, dl);
   3913 
   3914     // Different possible minimax approximations of significand in
   3915     // floating-point for various degrees of accuracy over [1,2].
   3916     SDValue Log2ofMantissa;
   3917     if (LimitFloatPrecision <= 6) {
   3918       // For floating-point precision of 6:
   3919       //
   3920       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
   3921       //
   3922       // error 0.0049451742, which is more than 7 bits
   3923       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3924                                getF32Constant(DAG, 0xbeb08fe0));
   3925       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3926                                getF32Constant(DAG, 0x40019463));
   3927       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3928       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3929                                    getF32Constant(DAG, 0x3fd6633d));
   3930     } else if (LimitFloatPrecision <= 12) {
   3931       // For floating-point precision of 12:
   3932       //
   3933       //   Log2ofMantissa =
   3934       //     -2.51285454f +
   3935       //       (4.07009056f +
   3936       //         (-2.12067489f +
   3937       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
   3938       //
   3939       // error 0.0000876136000, which is better than 13 bits
   3940       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3941                                getF32Constant(DAG, 0xbda7262e));
   3942       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3943                                getF32Constant(DAG, 0x3f25280b));
   3944       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3945       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3946                                getF32Constant(DAG, 0x4007b923));
   3947       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3948       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3949                                getF32Constant(DAG, 0x40823e2f));
   3950       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3951       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3952                                    getF32Constant(DAG, 0x4020d29c));
   3953     } else { // LimitFloatPrecision <= 18
   3954       // For floating-point precision of 18:
   3955       //
   3956       //   Log2ofMantissa =
   3957       //     -3.0400495f +
   3958       //       (6.1129976f +
   3959       //         (-5.3420409f +
   3960       //           (3.2865683f +
   3961       //             (-1.2669343f +
   3962       //               (0.27515199f -
   3963       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
   3964       //
   3965       // error 0.0000018516, which is better than 18 bits
   3966       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   3967                                getF32Constant(DAG, 0xbcd2769e));
   3968       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   3969                                getF32Constant(DAG, 0x3e8ce0b9));
   3970       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   3971       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   3972                                getF32Constant(DAG, 0x3fa22ae7));
   3973       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   3974       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   3975                                getF32Constant(DAG, 0x40525723));
   3976       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   3977       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
   3978                                getF32Constant(DAG, 0x40aaf200));
   3979       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   3980       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   3981                                getF32Constant(DAG, 0x40c39dad));
   3982       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   3983       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
   3984                                    getF32Constant(DAG, 0x4042902c));
   3985     }
   3986 
   3987     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
   3988   }
   3989 
   3990   // No special expansion.
   3991   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
   3992 }
   3993 
   3994 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
   3995 /// limited-precision mode.
   3996 static SDValue expandLog10(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
   3997                            const TargetLowering &TLI) {
   3998   if (Op.getValueType() == MVT::f32 &&
   3999       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   4000     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
   4001 
   4002     // Scale the exponent by log10(2) [0.30102999f].
   4003     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
   4004     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
   4005                                         getF32Constant(DAG, 0x3e9a209a));
   4006 
   4007     // Get the significand and build it into a floating-point number with
   4008     // exponent of 1.
   4009     SDValue X = GetSignificand(DAG, Op1, dl);
   4010 
   4011     SDValue Log10ofMantissa;
   4012     if (LimitFloatPrecision <= 6) {
   4013       // For floating-point precision of 6:
   4014       //
   4015       //   Log10ofMantissa =
   4016       //     -0.50419619f +
   4017       //       (0.60948995f - 0.10380950f * x) * x;
   4018       //
   4019       // error 0.0014886165, which is 6 bits
   4020       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4021                                getF32Constant(DAG, 0xbdd49a13));
   4022       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
   4023                                getF32Constant(DAG, 0x3f1c0789));
   4024       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   4025       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
   4026                                     getF32Constant(DAG, 0x3f011300));
   4027     } else if (LimitFloatPrecision <= 12) {
   4028       // For floating-point precision of 12:
   4029       //
   4030       //   Log10ofMantissa =
   4031       //     -0.64831180f +
   4032       //       (0.91751397f +
   4033       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
   4034       //
   4035       // error 0.00019228036, which is better than 12 bits
   4036       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4037                                getF32Constant(DAG, 0x3d431f31));
   4038       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
   4039                                getF32Constant(DAG, 0x3ea21fb2));
   4040       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   4041       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4042                                getF32Constant(DAG, 0x3f6ae232));
   4043       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4044       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
   4045                                     getF32Constant(DAG, 0x3f25f7c3));
   4046     } else { // LimitFloatPrecision <= 18
   4047       // For floating-point precision of 18:
   4048       //
   4049       //   Log10ofMantissa =
   4050       //     -0.84299375f +
   4051       //       (1.5327582f +
   4052       //         (-1.0688956f +
   4053       //           (0.49102474f +
   4054       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
   4055       //
   4056       // error 0.0000037995730, which is better than 18 bits
   4057       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4058                                getF32Constant(DAG, 0x3c5d51ce));
   4059       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
   4060                                getF32Constant(DAG, 0x3e00685a));
   4061       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
   4062       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4063                                getF32Constant(DAG, 0x3efb6798));
   4064       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4065       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
   4066                                getF32Constant(DAG, 0x3f88d192));
   4067       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4068       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4069                                getF32Constant(DAG, 0x3fc4316c));
   4070       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   4071       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
   4072                                     getF32Constant(DAG, 0x3f57ce70));
   4073     }
   4074 
   4075     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
   4076   }
   4077 
   4078   // No special expansion.
   4079   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
   4080 }
   4081 
   4082 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
   4083 /// limited-precision mode.
   4084 static SDValue expandExp2(DebugLoc dl, SDValue Op, SelectionDAG &DAG,
   4085                           const TargetLowering &TLI) {
   4086   if (Op.getValueType() == MVT::f32 &&
   4087       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   4088     SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
   4089 
   4090     //   FractionalPartOfX = x - (float)IntegerPartOfX;
   4091     SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
   4092     SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
   4093 
   4094     //   IntegerPartOfX <<= 23;
   4095     IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
   4096                                  DAG.getConstant(23, TLI.getPointerTy()));
   4097 
   4098     SDValue TwoToFractionalPartOfX;
   4099     if (LimitFloatPrecision <= 6) {
   4100       // For floating-point precision of 6:
   4101       //
   4102       //   TwoToFractionalPartOfX =
   4103       //     0.997535578f +
   4104       //       (0.735607626f + 0.252464424f * x) * x;
   4105       //
   4106       // error 0.0144103317, which is 6 bits
   4107       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4108                                getF32Constant(DAG, 0x3e814304));
   4109       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4110                                getF32Constant(DAG, 0x3f3c50c8));
   4111       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4112       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4113                                            getF32Constant(DAG, 0x3f7f5e7e));
   4114     } else if (LimitFloatPrecision <= 12) {
   4115       // For floating-point precision of 12:
   4116       //
   4117       //   TwoToFractionalPartOfX =
   4118       //     0.999892986f +
   4119       //       (0.696457318f +
   4120       //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
   4121       //
   4122       // error 0.000107046256, which is 13 to 14 bits
   4123       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4124                                getF32Constant(DAG, 0x3da235e3));
   4125       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4126                                getF32Constant(DAG, 0x3e65b8f3));
   4127       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4128       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4129                                getF32Constant(DAG, 0x3f324b07));
   4130       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4131       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4132                                            getF32Constant(DAG, 0x3f7ff8fd));
   4133     } else { // LimitFloatPrecision <= 18
   4134       // For floating-point precision of 18:
   4135       //
   4136       //   TwoToFractionalPartOfX =
   4137       //     0.999999982f +
   4138       //       (0.693148872f +
   4139       //         (0.240227044f +
   4140       //           (0.554906021e-1f +
   4141       //             (0.961591928e-2f +
   4142       //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
   4143       // error 2.47208000*10^(-7), which is better than 18 bits
   4144       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4145                                getF32Constant(DAG, 0x3924b03e));
   4146       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4147                                getF32Constant(DAG, 0x3ab24b87));
   4148       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4149       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4150                                getF32Constant(DAG, 0x3c1d8c17));
   4151       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4152       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4153                                getF32Constant(DAG, 0x3d634a1d));
   4154       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   4155       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   4156                                getF32Constant(DAG, 0x3e75fe14));
   4157       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   4158       SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
   4159                                 getF32Constant(DAG, 0x3f317234));
   4160       SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
   4161       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
   4162                                            getF32Constant(DAG, 0x3f800000));
   4163     }
   4164 
   4165     // Add the exponent into the result in integer domain.
   4166     SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32,
   4167                               TwoToFractionalPartOfX);
   4168     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
   4169                        DAG.getNode(ISD::ADD, dl, MVT::i32,
   4170                                    t13, IntegerPartOfX));
   4171   }
   4172 
   4173   // No special expansion.
   4174   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
   4175 }
   4176 
   4177 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
   4178 /// limited-precision mode with x == 10.0f.
   4179 static SDValue expandPow(DebugLoc dl, SDValue LHS, SDValue RHS,
   4180                          SelectionDAG &DAG, const TargetLowering &TLI) {
   4181   bool IsExp10 = false;
   4182   if (LHS.getValueType() == MVT::f32 && LHS.getValueType() == MVT::f32 &&
   4183       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
   4184     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
   4185       APFloat Ten(10.0f);
   4186       IsExp10 = LHSC->isExactlyValue(Ten);
   4187     }
   4188   }
   4189 
   4190   if (IsExp10) {
   4191     // Put the exponent in the right bit position for later addition to the
   4192     // final result:
   4193     //
   4194     //   #define LOG2OF10 3.3219281f
   4195     //   IntegerPartOfX = (int32_t)(x * LOG2OF10);
   4196     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
   4197                              getF32Constant(DAG, 0x40549a78));
   4198     SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
   4199 
   4200     //   FractionalPartOfX = x - (float)IntegerPartOfX;
   4201     SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
   4202     SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
   4203 
   4204     //   IntegerPartOfX <<= 23;
   4205     IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
   4206                                  DAG.getConstant(23, TLI.getPointerTy()));
   4207 
   4208     SDValue TwoToFractionalPartOfX;
   4209     if (LimitFloatPrecision <= 6) {
   4210       // For floating-point precision of 6:
   4211       //
   4212       //   twoToFractionalPartOfX =
   4213       //     0.997535578f +
   4214       //       (0.735607626f + 0.252464424f * x) * x;
   4215       //
   4216       // error 0.0144103317, which is 6 bits
   4217       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4218                                getF32Constant(DAG, 0x3e814304));
   4219       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4220                                getF32Constant(DAG, 0x3f3c50c8));
   4221       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4222       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4223                                            getF32Constant(DAG, 0x3f7f5e7e));
   4224     } else if (LimitFloatPrecision <= 12) {
   4225       // For floating-point precision of 12:
   4226       //
   4227       //   TwoToFractionalPartOfX =
   4228       //     0.999892986f +
   4229       //       (0.696457318f +
   4230       //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
   4231       //
   4232       // error 0.000107046256, which is 13 to 14 bits
   4233       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4234                                getF32Constant(DAG, 0x3da235e3));
   4235       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4236                                getF32Constant(DAG, 0x3e65b8f3));
   4237       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4238       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4239                                getF32Constant(DAG, 0x3f324b07));
   4240       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4241       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4242                                            getF32Constant(DAG, 0x3f7ff8fd));
   4243     } else { // LimitFloatPrecision <= 18
   4244       // For floating-point precision of 18:
   4245       //
   4246       //   TwoToFractionalPartOfX =
   4247       //     0.999999982f +
   4248       //       (0.693148872f +
   4249       //         (0.240227044f +
   4250       //           (0.554906021e-1f +
   4251       //             (0.961591928e-2f +
   4252       //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
   4253       // error 2.47208000*10^(-7), which is better than 18 bits
   4254       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
   4255                                getF32Constant(DAG, 0x3924b03e));
   4256       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
   4257                                getF32Constant(DAG, 0x3ab24b87));
   4258       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
   4259       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
   4260                                getF32Constant(DAG, 0x3c1d8c17));
   4261       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
   4262       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
   4263                                getF32Constant(DAG, 0x3d634a1d));
   4264       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
   4265       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
   4266                                getF32Constant(DAG, 0x3e75fe14));
   4267       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
   4268       SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
   4269                                 getF32Constant(DAG, 0x3f317234));
   4270       SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
   4271       TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
   4272                                            getF32Constant(DAG, 0x3f800000));
   4273     }
   4274 
   4275     SDValue t13 = DAG.getNode(ISD::BITCAST, dl,MVT::i32,TwoToFractionalPartOfX);
   4276     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
   4277                        DAG.getNode(ISD::ADD, dl, MVT::i32,
   4278                                    t13, IntegerPartOfX));
   4279   }
   4280 
   4281   // No special expansion.
   4282   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
   4283 }
   4284 
   4285 
   4286 /// ExpandPowI - Expand a llvm.powi intrinsic.
   4287 static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
   4288                           SelectionDAG &DAG) {
   4289   // If RHS is a constant, we can expand this out to a multiplication tree,
   4290   // otherwise we end up lowering to a call to __powidf2 (for example).  When
   4291   // optimizing for size, we only want to do this if the expansion would produce
   4292   // a small number of multiplies, otherwise we do the full expansion.
   4293   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
   4294     // Get the exponent as a positive value.
   4295     unsigned Val = RHSC->getSExtValue();
   4296     if ((int)Val < 0) Val = -Val;
   4297 
   4298     // powi(x, 0) -> 1.0
   4299     if (Val == 0)
   4300       return DAG.getConstantFP(1.0, LHS.getValueType());
   4301 
   4302     const Function *F = DAG.getMachineFunction().getFunction();
   4303     if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
   4304                                          Attribute::OptimizeForSize) ||
   4305         // If optimizing for size, don't insert too many multiplies.  This
   4306         // inserts up to 5 multiplies.
   4307         CountPopulation_32(Val)+Log2_32(Val) < 7) {
   4308       // We use the simple binary decomposition method to generate the multiply
   4309       // sequence.  There are more optimal ways to do this (for example,
   4310       // powi(x,15) generates one more multiply than it should), but this has
   4311       // the benefit of being both really simple and much better than a libcall.
   4312       SDValue Res;  // Logically starts equal to 1.0
   4313       SDValue CurSquare = LHS;
   4314       while (Val) {
   4315         if (Val & 1) {
   4316           if (Res.getNode())
   4317             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
   4318           else
   4319             Res = CurSquare;  // 1.0*CurSquare.
   4320         }
   4321 
   4322         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
   4323                                 CurSquare, CurSquare);
   4324         Val >>= 1;
   4325       }
   4326 
   4327       // If the original was negative, invert the result, producing 1/(x*x*x).
   4328       if (RHSC->getSExtValue() < 0)
   4329         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
   4330                           DAG.getConstantFP(1.0, LHS.getValueType()), Res);
   4331       return Res;
   4332     }
   4333   }
   4334 
   4335   // Otherwise, expand to a libcall.
   4336   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
   4337 }
   4338 
   4339 // getTruncatedArgReg - Find underlying register used for an truncated
   4340 // argument.
   4341 static unsigned getTruncatedArgReg(const SDValue &N) {
   4342   if (N.getOpcode() != ISD::TRUNCATE)
   4343     return 0;
   4344 
   4345   const SDValue &Ext = N.getOperand(0);
   4346   if (Ext.getOpcode() == ISD::AssertZext || Ext.getOpcode() == ISD::AssertSext){
   4347     const SDValue &CFR = Ext.getOperand(0);
   4348     if (CFR.getOpcode() == ISD::CopyFromReg)
   4349       return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
   4350     if (CFR.getOpcode() == ISD::TRUNCATE)
   4351       return getTruncatedArgReg(CFR);
   4352   }
   4353   return 0;
   4354 }
   4355 
   4356 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
   4357 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
   4358 /// At the end of instruction selection, they will be inserted to the entry BB.
   4359 bool
   4360 SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
   4361                                               int64_t Offset,
   4362                                               const SDValue &N) {
   4363   const Argument *Arg = dyn_cast<Argument>(V);
   4364   if (!Arg)
   4365     return false;
   4366 
   4367   MachineFunction &MF = DAG.getMachineFunction();
   4368   const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
   4369   const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
   4370 
   4371   // Ignore inlined function arguments here.
   4372   DIVariable DV(Variable);
   4373   if (DV.isInlinedFnArgument(MF.getFunction()))
   4374     return false;
   4375 
   4376   unsigned Reg = 0;
   4377   // Some arguments' frame index is recorded during argument lowering.
   4378   Offset = FuncInfo.getArgumentFrameIndex(Arg);
   4379   if (Offset)
   4380     Reg = TRI->getFrameRegister(MF);
   4381 
   4382   if (!Reg && N.getNode()) {
   4383     if (N.getOpcode() == ISD::CopyFromReg)
   4384       Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
   4385     else
   4386       Reg = getTruncatedArgReg(N);
   4387     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
   4388       MachineRegisterInfo &RegInfo = MF.getRegInfo();
   4389       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
   4390       if (PR)
   4391         Reg = PR;
   4392     }
   4393   }
   4394 
   4395   if (!Reg) {
   4396     // Check if ValueMap has reg number.
   4397     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
   4398     if (VMI != FuncInfo.ValueMap.end())
   4399       Reg = VMI->second;
   4400   }
   4401 
   4402   if (!Reg && N.getNode()) {
   4403     // Check if frame index is available.
   4404     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
   4405       if (FrameIndexSDNode *FINode =
   4406           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) {
   4407         Reg = TRI->getFrameRegister(MF);
   4408         Offset = FINode->getIndex();
   4409       }
   4410   }
   4411 
   4412   if (!Reg)
   4413     return false;
   4414 
   4415   MachineInstrBuilder MIB = BuildMI(MF, getCurDebugLoc(),
   4416                                     TII->get(TargetOpcode::DBG_VALUE))
   4417     .addReg(Reg, RegState::Debug).addImm(Offset).addMetadata(Variable);
   4418   FuncInfo.ArgDbgValues.push_back(&*MIB);
   4419   return true;
   4420 }
   4421 
   4422 // VisualStudio defines setjmp as _setjmp
   4423 #if defined(_MSC_VER) && defined(setjmp) && \
   4424                          !defined(setjmp_undefined_for_msvc)
   4425 #  pragma push_macro("setjmp")
   4426 #  undef setjmp
   4427 #  define setjmp_undefined_for_msvc
   4428 #endif
   4429 
   4430 /// visitIntrinsicCall - Lower the call to the specified intrinsic function.  If
   4431 /// we want to emit this as a call to a named external function, return the name
   4432 /// otherwise lower it and return null.
   4433 const char *
   4434 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
   4435   DebugLoc dl = getCurDebugLoc();
   4436   SDValue Res;
   4437 
   4438   switch (Intrinsic) {
   4439   default:
   4440     // By default, turn this into a target intrinsic node.
   4441     visitTargetIntrinsic(I, Intrinsic);
   4442     return 0;
   4443   case Intrinsic::vastart:  visitVAStart(I); return 0;
   4444   case Intrinsic::vaend:    visitVAEnd(I); return 0;
   4445   case Intrinsic::vacopy:   visitVACopy(I); return 0;
   4446   case Intrinsic::returnaddress:
   4447     setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
   4448                              getValue(I.getArgOperand(0))));
   4449     return 0;
   4450   case Intrinsic::frameaddress:
   4451     setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
   4452                              getValue(I.getArgOperand(0))));
   4453     return 0;
   4454   case Intrinsic::setjmp:
   4455     return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
   4456   case Intrinsic::longjmp:
   4457     return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
   4458   case Intrinsic::memcpy: {
   4459     // Assert for address < 256 since we support only user defined address
   4460     // spaces.
   4461     assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
   4462            < 256 &&
   4463            cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
   4464            < 256 &&
   4465            "Unknown address space");
   4466     SDValue Op1 = getValue(I.getArgOperand(0));
   4467     SDValue Op2 = getValue(I.getArgOperand(1));
   4468     SDValue Op3 = getValue(I.getArgOperand(2));
   4469     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
   4470     if (!Align)
   4471       Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
   4472     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
   4473     DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, isVol, false,
   4474                               MachinePointerInfo(I.getArgOperand(0)),
   4475                               MachinePointerInfo(I.getArgOperand(1))));
   4476     return 0;
   4477   }
   4478   case Intrinsic::memset: {
   4479     // Assert for address < 256 since we support only user defined address
   4480     // spaces.
   4481     assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
   4482            < 256 &&
   4483            "Unknown address space");
   4484     SDValue Op1 = getValue(I.getArgOperand(0));
   4485     SDValue Op2 = getValue(I.getArgOperand(1));
   4486     SDValue Op3 = getValue(I.getArgOperand(2));
   4487     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
   4488     if (!Align)
   4489       Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
   4490     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
   4491     DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
   4492                               MachinePointerInfo(I.getArgOperand(0))));
   4493     return 0;
   4494   }
   4495   case Intrinsic::memmove: {
   4496     // Assert for address < 256 since we support only user defined address
   4497     // spaces.
   4498     assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
   4499            < 256 &&
   4500            cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
   4501            < 256 &&
   4502            "Unknown address space");
   4503     SDValue Op1 = getValue(I.getArgOperand(0));
   4504     SDValue Op2 = getValue(I.getArgOperand(1));
   4505     SDValue Op3 = getValue(I.getArgOperand(2));
   4506     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
   4507     if (!Align)
   4508       Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
   4509     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
   4510     DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, isVol,
   4511                                MachinePointerInfo(I.getArgOperand(0)),
   4512                                MachinePointerInfo(I.getArgOperand(1))));
   4513     return 0;
   4514   }
   4515   case Intrinsic::dbg_declare: {
   4516     const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
   4517     MDNode *Variable = DI.getVariable();
   4518     const Value *Address = DI.getAddress();
   4519     if (!Address || !DIVariable(Variable).Verify()) {
   4520       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4521       return 0;
   4522     }
   4523 
   4524     // Build an entry in DbgOrdering.  Debug info input nodes get an SDNodeOrder
   4525     // but do not always have a corresponding SDNode built.  The SDNodeOrder
   4526     // absolute, but not relative, values are different depending on whether
   4527     // debug info exists.
   4528     ++SDNodeOrder;
   4529 
   4530     // Check if address has undef value.
   4531     if (isa<UndefValue>(Address) ||
   4532         (Address->use_empty() && !isa<Argument>(Address))) {
   4533       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4534       return 0;
   4535     }
   4536 
   4537     SDValue &N = NodeMap[Address];
   4538     if (!N.getNode() && isa<Argument>(Address))
   4539       // Check unused arguments map.
   4540       N = UnusedArgNodeMap[Address];
   4541     SDDbgValue *SDV;
   4542     if (N.getNode()) {
   4543       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
   4544         Address = BCI->getOperand(0);
   4545       // Parameters are handled specially.
   4546       bool isParameter =
   4547         (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
   4548          isa<Argument>(Address));
   4549 
   4550       const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
   4551 
   4552       if (isParameter && !AI) {
   4553         FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
   4554         if (FINode)
   4555           // Byval parameter.  We have a frame index at this point.
   4556           SDV = DAG.getDbgValue(Variable, FINode->getIndex(),
   4557                                 0, dl, SDNodeOrder);
   4558         else {
   4559           // Address is an argument, so try to emit its dbg value using
   4560           // virtual register info from the FuncInfo.ValueMap.
   4561           EmitFuncArgumentDbgValue(Address, Variable, 0, N);
   4562           return 0;
   4563         }
   4564       } else if (AI)
   4565         SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
   4566                               0, dl, SDNodeOrder);
   4567       else {
   4568         // Can't do anything with other non-AI cases yet.
   4569         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4570         DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
   4571         DEBUG(Address->dump());
   4572         return 0;
   4573       }
   4574       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
   4575     } else {
   4576       // If Address is an argument then try to emit its dbg value using
   4577       // virtual register info from the FuncInfo.ValueMap.
   4578       if (!EmitFuncArgumentDbgValue(Address, Variable, 0, N)) {
   4579         // If variable is pinned by a alloca in dominating bb then
   4580         // use StaticAllocaMap.
   4581         if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
   4582           if (AI->getParent() != DI.getParent()) {
   4583             DenseMap<const AllocaInst*, int>::iterator SI =
   4584               FuncInfo.StaticAllocaMap.find(AI);
   4585             if (SI != FuncInfo.StaticAllocaMap.end()) {
   4586               SDV = DAG.getDbgValue(Variable, SI->second,
   4587                                     0, dl, SDNodeOrder);
   4588               DAG.AddDbgValue(SDV, 0, false);
   4589               return 0;
   4590             }
   4591           }
   4592         }
   4593         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4594       }
   4595     }
   4596     return 0;
   4597   }
   4598   case Intrinsic::dbg_value: {
   4599     const DbgValueInst &DI = cast<DbgValueInst>(I);
   4600     if (!DIVariable(DI.getVariable()).Verify())
   4601       return 0;
   4602 
   4603     MDNode *Variable = DI.getVariable();
   4604     uint64_t Offset = DI.getOffset();
   4605     const Value *V = DI.getValue();
   4606     if (!V)
   4607       return 0;
   4608 
   4609     // Build an entry in DbgOrdering.  Debug info input nodes get an SDNodeOrder
   4610     // but do not always have a corresponding SDNode built.  The SDNodeOrder
   4611     // absolute, but not relative, values are different depending on whether
   4612     // debug info exists.
   4613     ++SDNodeOrder;
   4614     SDDbgValue *SDV;
   4615     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
   4616       SDV = DAG.getDbgValue(Variable, V, Offset, dl, SDNodeOrder);
   4617       DAG.AddDbgValue(SDV, 0, false);
   4618     } else {
   4619       // Do not use getValue() in here; we don't want to generate code at
   4620       // this point if it hasn't been done yet.
   4621       SDValue N = NodeMap[V];
   4622       if (!N.getNode() && isa<Argument>(V))
   4623         // Check unused arguments map.
   4624         N = UnusedArgNodeMap[V];
   4625       if (N.getNode()) {
   4626         if (!EmitFuncArgumentDbgValue(V, Variable, Offset, N)) {
   4627           SDV = DAG.getDbgValue(Variable, N.getNode(),
   4628                                 N.getResNo(), Offset, dl, SDNodeOrder);
   4629           DAG.AddDbgValue(SDV, N.getNode(), false);
   4630         }
   4631       } else if (!V->use_empty() ) {
   4632         // Do not call getValue(V) yet, as we don't want to generate code.
   4633         // Remember it for later.
   4634         DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
   4635         DanglingDebugInfoMap[V] = DDI;
   4636       } else {
   4637         // We may expand this to cover more cases.  One case where we have no
   4638         // data available is an unreferenced parameter.
   4639         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
   4640       }
   4641     }
   4642 
   4643     // Build a debug info table entry.
   4644     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
   4645       V = BCI->getOperand(0);
   4646     const AllocaInst *AI = dyn_cast<AllocaInst>(V);
   4647     // Don't handle byval struct arguments or VLAs, for example.
   4648     if (!AI) {
   4649       DEBUG(dbgs() << "Dropping debug location info for:\n  " << DI << "\n");
   4650       DEBUG(dbgs() << "  Last seen at:\n    " << *V << "\n");
   4651       return 0;
   4652     }
   4653     DenseMap<const AllocaInst*, int>::iterator SI =
   4654       FuncInfo.StaticAllocaMap.find(AI);
   4655     if (SI == FuncInfo.StaticAllocaMap.end())
   4656       return 0; // VLAs.
   4657     int FI = SI->second;
   4658 
   4659     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   4660     if (!DI.getDebugLoc().isUnknown() && MMI.hasDebugInfo())
   4661       MMI.setVariableDbgInfo(Variable, FI, DI.getDebugLoc());
   4662     return 0;
   4663   }
   4664 
   4665   case Intrinsic::eh_typeid_for: {
   4666     // Find the type id for the given typeinfo.
   4667     GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
   4668     unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
   4669     Res = DAG.getConstant(TypeID, MVT::i32);
   4670     setValue(&I, Res);
   4671     return 0;
   4672   }
   4673 
   4674   case Intrinsic::eh_return_i32:
   4675   case Intrinsic::eh_return_i64:
   4676     DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
   4677     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
   4678                             MVT::Other,
   4679                             getControlRoot(),
   4680                             getValue(I.getArgOperand(0)),
   4681                             getValue(I.getArgOperand(1))));
   4682     return 0;
   4683   case Intrinsic::eh_unwind_init:
   4684     DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
   4685     return 0;
   4686   case Intrinsic::eh_dwarf_cfa: {
   4687     SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), dl,
   4688                                         TLI.getPointerTy());
   4689     SDValue Offset = DAG.getNode(ISD::ADD, dl,
   4690                                  TLI.getPointerTy(),
   4691                                  DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
   4692                                              TLI.getPointerTy()),
   4693                                  CfaArg);
   4694     SDValue FA = DAG.getNode(ISD::FRAMEADDR, dl,
   4695                              TLI.getPointerTy(),
   4696                              DAG.getConstant(0, TLI.getPointerTy()));
   4697     setValue(&I, DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
   4698                              FA, Offset));
   4699     return 0;
   4700   }
   4701   case Intrinsic::eh_sjlj_callsite: {
   4702     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   4703     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
   4704     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
   4705     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
   4706 
   4707     MMI.setCurrentCallSite(CI->getZExtValue());
   4708     return 0;
   4709   }
   4710   case Intrinsic::eh_sjlj_functioncontext: {
   4711     // Get and store the index of the function context.
   4712     MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
   4713     AllocaInst *FnCtx =
   4714       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
   4715     int FI = FuncInfo.StaticAllocaMap[FnCtx];
   4716     MFI->setFunctionContextIndex(FI);
   4717     return 0;
   4718   }
   4719   case Intrinsic::eh_sjlj_setjmp: {
   4720     SDValue Ops[2];
   4721     Ops[0] = getRoot();
   4722     Ops[1] = getValue(I.getArgOperand(0));
   4723     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, dl,
   4724                              DAG.getVTList(MVT::i32, MVT::Other),
   4725                              Ops, 2);
   4726     setValue(&I, Op.getValue(0));
   4727     DAG.setRoot(Op.getValue(1));
   4728     return 0;
   4729   }
   4730   case Intrinsic::eh_sjlj_longjmp: {
   4731     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, dl, MVT::Other,
   4732                             getRoot(), getValue(I.getArgOperand(0))));
   4733     return 0;
   4734   }
   4735 
   4736   case Intrinsic::x86_mmx_pslli_w:
   4737   case Intrinsic::x86_mmx_pslli_d:
   4738   case Intrinsic::x86_mmx_pslli_q:
   4739   case Intrinsic::x86_mmx_psrli_w:
   4740   case Intrinsic::x86_mmx_psrli_d:
   4741   case Intrinsic::x86_mmx_psrli_q:
   4742   case Intrinsic::x86_mmx_psrai_w:
   4743   case Intrinsic::x86_mmx_psrai_d: {
   4744     SDValue ShAmt = getValue(I.getArgOperand(1));
   4745     if (isa<ConstantSDNode>(ShAmt)) {
   4746       visitTargetIntrinsic(I, Intrinsic);
   4747       return 0;
   4748     }
   4749     unsigned NewIntrinsic = 0;
   4750     EVT ShAmtVT = MVT::v2i32;
   4751     switch (Intrinsic) {
   4752     case Intrinsic::x86_mmx_pslli_w:
   4753       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
   4754       break;
   4755     case Intrinsic::x86_mmx_pslli_d:
   4756       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
   4757       break;
   4758     case Intrinsic::x86_mmx_pslli_q:
   4759       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
   4760       break;
   4761     case Intrinsic::x86_mmx_psrli_w:
   4762       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
   4763       break;
   4764     case Intrinsic::x86_mmx_psrli_d:
   4765       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
   4766       break;
   4767     case Intrinsic::x86_mmx_psrli_q:
   4768       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
   4769       break;
   4770     case Intrinsic::x86_mmx_psrai_w:
   4771       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
   4772       break;
   4773     case Intrinsic::x86_mmx_psrai_d:
   4774       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
   4775       break;
   4776     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   4777     }
   4778 
   4779     // The vector shift intrinsics with scalars uses 32b shift amounts but
   4780     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
   4781     // to be zero.
   4782     // We must do this early because v2i32 is not a legal type.
   4783     SDValue ShOps[2];
   4784     ShOps[0] = ShAmt;
   4785     ShOps[1] = DAG.getConstant(0, MVT::i32);
   4786     ShAmt =  DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
   4787     EVT DestVT = TLI.getValueType(I.getType());
   4788     ShAmt = DAG.getNode(ISD::BITCAST, dl, DestVT, ShAmt);
   4789     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
   4790                        DAG.getConstant(NewIntrinsic, MVT::i32),
   4791                        getValue(I.getArgOperand(0)), ShAmt);
   4792     setValue(&I, Res);
   4793     return 0;
   4794   }
   4795   case Intrinsic::x86_avx_vinsertf128_pd_256:
   4796   case Intrinsic::x86_avx_vinsertf128_ps_256:
   4797   case Intrinsic::x86_avx_vinsertf128_si_256:
   4798   case Intrinsic::x86_avx2_vinserti128: {
   4799     EVT DestVT = TLI.getValueType(I.getType());
   4800     EVT ElVT = TLI.getValueType(I.getArgOperand(1)->getType());
   4801     uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
   4802                    ElVT.getVectorNumElements();
   4803     Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, DestVT,
   4804                       getValue(I.getArgOperand(0)),
   4805                       getValue(I.getArgOperand(1)),
   4806                       DAG.getIntPtrConstant(Idx));
   4807     setValue(&I, Res);
   4808     return 0;
   4809   }
   4810   case Intrinsic::x86_avx_vextractf128_pd_256:
   4811   case Intrinsic::x86_avx_vextractf128_ps_256:
   4812   case Intrinsic::x86_avx_vextractf128_si_256:
   4813   case Intrinsic::x86_avx2_vextracti128: {
   4814     EVT DestVT = TLI.getValueType(I.getType());
   4815     uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
   4816                    DestVT.getVectorNumElements();
   4817     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT,
   4818                       getValue(I.getArgOperand(0)),
   4819                       DAG.getIntPtrConstant(Idx));
   4820     setValue(&I, Res);
   4821     return 0;
   4822   }
   4823   case Intrinsic::convertff:
   4824   case Intrinsic::convertfsi:
   4825   case Intrinsic::convertfui:
   4826   case Intrinsic::convertsif:
   4827   case Intrinsic::convertuif:
   4828   case Intrinsic::convertss:
   4829   case Intrinsic::convertsu:
   4830   case Intrinsic::convertus:
   4831   case Intrinsic::convertuu: {
   4832     ISD::CvtCode Code = ISD::CVT_INVALID;
   4833     switch (Intrinsic) {
   4834     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   4835     case Intrinsic::convertff:  Code = ISD::CVT_FF; break;
   4836     case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
   4837     case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
   4838     case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
   4839     case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
   4840     case Intrinsic::convertss:  Code = ISD::CVT_SS; break;
   4841     case Intrinsic::convertsu:  Code = ISD::CVT_SU; break;
   4842     case Intrinsic::convertus:  Code = ISD::CVT_US; break;
   4843     case Intrinsic::convertuu:  Code = ISD::CVT_UU; break;
   4844     }
   4845     EVT DestVT = TLI.getValueType(I.getType());
   4846     const Value *Op1 = I.getArgOperand(0);
   4847     Res = DAG.getConvertRndSat(DestVT, dl, getValue(Op1),
   4848                                DAG.getValueType(DestVT),
   4849                                DAG.getValueType(getValue(Op1).getValueType()),
   4850                                getValue(I.getArgOperand(1)),
   4851                                getValue(I.getArgOperand(2)),
   4852                                Code);
   4853     setValue(&I, Res);
   4854     return 0;
   4855   }
   4856   case Intrinsic::powi:
   4857     setValue(&I, ExpandPowI(dl, getValue(I.getArgOperand(0)),
   4858                             getValue(I.getArgOperand(1)), DAG));
   4859     return 0;
   4860   case Intrinsic::log:
   4861     setValue(&I, expandLog(dl, getValue(I.getArgOperand(0)), DAG, TLI));
   4862     return 0;
   4863   case Intrinsic::log2:
   4864     setValue(&I, expandLog2(dl, getValue(I.getArgOperand(0)), DAG, TLI));
   4865     return 0;
   4866   case Intrinsic::log10:
   4867     setValue(&I, expandLog10(dl, getValue(I.getArgOperand(0)), DAG, TLI));
   4868     return 0;
   4869   case Intrinsic::exp:
   4870     setValue(&I, expandExp(dl, getValue(I.getArgOperand(0)), DAG, TLI));
   4871     return 0;
   4872   case Intrinsic::exp2:
   4873     setValue(&I, expandExp2(dl, getValue(I.getArgOperand(0)), DAG, TLI));
   4874     return 0;
   4875   case Intrinsic::pow:
   4876     setValue(&I, expandPow(dl, getValue(I.getArgOperand(0)),
   4877                            getValue(I.getArgOperand(1)), DAG, TLI));
   4878     return 0;
   4879   case Intrinsic::sqrt:
   4880   case Intrinsic::fabs:
   4881   case Intrinsic::sin:
   4882   case Intrinsic::cos:
   4883   case Intrinsic::floor:
   4884   case Intrinsic::ceil:
   4885   case Intrinsic::trunc:
   4886   case Intrinsic::rint:
   4887   case Intrinsic::nearbyint: {
   4888     unsigned Opcode;
   4889     switch (Intrinsic) {
   4890     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   4891     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
   4892     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
   4893     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
   4894     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
   4895     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
   4896     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
   4897     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
   4898     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
   4899     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
   4900     }
   4901 
   4902     setValue(&I, DAG.getNode(Opcode, dl,
   4903                              getValue(I.getArgOperand(0)).getValueType(),
   4904                              getValue(I.getArgOperand(0))));
   4905     return 0;
   4906   }
   4907   case Intrinsic::fma:
   4908     setValue(&I, DAG.getNode(ISD::FMA, dl,
   4909                              getValue(I.getArgOperand(0)).getValueType(),
   4910                              getValue(I.getArgOperand(0)),
   4911                              getValue(I.getArgOperand(1)),
   4912                              getValue(I.getArgOperand(2))));
   4913     return 0;
   4914   case Intrinsic::fmuladd: {
   4915     EVT VT = TLI.getValueType(I.getType());
   4916     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
   4917         TLI.isOperationLegalOrCustom(ISD::FMA, VT) &&
   4918         TLI.isFMAFasterThanMulAndAdd(VT)){
   4919       setValue(&I, DAG.getNode(ISD::FMA, dl,
   4920                                getValue(I.getArgOperand(0)).getValueType(),
   4921                                getValue(I.getArgOperand(0)),
   4922                                getValue(I.getArgOperand(1)),
   4923                                getValue(I.getArgOperand(2))));
   4924     } else {
   4925       SDValue Mul = DAG.getNode(ISD::FMUL, dl,
   4926                                 getValue(I.getArgOperand(0)).getValueType(),
   4927                                 getValue(I.getArgOperand(0)),
   4928                                 getValue(I.getArgOperand(1)));
   4929       SDValue Add = DAG.getNode(ISD::FADD, dl,
   4930                                 getValue(I.getArgOperand(0)).getValueType(),
   4931                                 Mul,
   4932                                 getValue(I.getArgOperand(2)));
   4933       setValue(&I, Add);
   4934     }
   4935     return 0;
   4936   }
   4937   case Intrinsic::convert_to_fp16:
   4938     setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, dl,
   4939                              MVT::i16, getValue(I.getArgOperand(0))));
   4940     return 0;
   4941   case Intrinsic::convert_from_fp16:
   4942     setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, dl,
   4943                              MVT::f32, getValue(I.getArgOperand(0))));
   4944     return 0;
   4945   case Intrinsic::pcmarker: {
   4946     SDValue Tmp = getValue(I.getArgOperand(0));
   4947     DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
   4948     return 0;
   4949   }
   4950   case Intrinsic::readcyclecounter: {
   4951     SDValue Op = getRoot();
   4952     Res = DAG.getNode(ISD::READCYCLECOUNTER, dl,
   4953                       DAG.getVTList(MVT::i64, MVT::Other),
   4954                       &Op, 1);
   4955     setValue(&I, Res);
   4956     DAG.setRoot(Res.getValue(1));
   4957     return 0;
   4958   }
   4959   case Intrinsic::bswap:
   4960     setValue(&I, DAG.getNode(ISD::BSWAP, dl,
   4961                              getValue(I.getArgOperand(0)).getValueType(),
   4962                              getValue(I.getArgOperand(0))));
   4963     return 0;
   4964   case Intrinsic::cttz: {
   4965     SDValue Arg = getValue(I.getArgOperand(0));
   4966     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
   4967     EVT Ty = Arg.getValueType();
   4968     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
   4969                              dl, Ty, Arg));
   4970     return 0;
   4971   }
   4972   case Intrinsic::ctlz: {
   4973     SDValue Arg = getValue(I.getArgOperand(0));
   4974     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
   4975     EVT Ty = Arg.getValueType();
   4976     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
   4977                              dl, Ty, Arg));
   4978     return 0;
   4979   }
   4980   case Intrinsic::ctpop: {
   4981     SDValue Arg = getValue(I.getArgOperand(0));
   4982     EVT Ty = Arg.getValueType();
   4983     setValue(&I, DAG.getNode(ISD::CTPOP, dl, Ty, Arg));
   4984     return 0;
   4985   }
   4986   case Intrinsic::stacksave: {
   4987     SDValue Op = getRoot();
   4988     Res = DAG.getNode(ISD::STACKSAVE, dl,
   4989                       DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
   4990     setValue(&I, Res);
   4991     DAG.setRoot(Res.getValue(1));
   4992     return 0;
   4993   }
   4994   case Intrinsic::stackrestore: {
   4995     Res = getValue(I.getArgOperand(0));
   4996     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res));
   4997     return 0;
   4998   }
   4999   case Intrinsic::stackprotector: {
   5000     // Emit code into the DAG to store the stack guard onto the stack.
   5001     MachineFunction &MF = DAG.getMachineFunction();
   5002     MachineFrameInfo *MFI = MF.getFrameInfo();
   5003     EVT PtrTy = TLI.getPointerTy();
   5004 
   5005     SDValue Src = getValue(I.getArgOperand(0));   // The guard's value.
   5006     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
   5007 
   5008     int FI = FuncInfo.StaticAllocaMap[Slot];
   5009     MFI->setStackProtectorIndex(FI);
   5010 
   5011     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
   5012 
   5013     // Store the stack protector onto the stack.
   5014     Res = DAG.getStore(getRoot(), dl, Src, FIN,
   5015                        MachinePointerInfo::getFixedStack(FI),
   5016                        true, false, 0);
   5017     setValue(&I, Res);
   5018     DAG.setRoot(Res);
   5019     return 0;
   5020   }
   5021   case Intrinsic::objectsize: {
   5022     // If we don't know by now, we're never going to know.
   5023     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
   5024 
   5025     assert(CI && "Non-constant type in __builtin_object_size?");
   5026 
   5027     SDValue Arg = getValue(I.getCalledValue());
   5028     EVT Ty = Arg.getValueType();
   5029 
   5030     if (CI->isZero())
   5031       Res = DAG.getConstant(-1ULL, Ty);
   5032     else
   5033       Res = DAG.getConstant(0, Ty);
   5034 
   5035     setValue(&I, Res);
   5036     return 0;
   5037   }
   5038   case Intrinsic::var_annotation:
   5039     // Discard annotate attributes
   5040     return 0;
   5041 
   5042   case Intrinsic::init_trampoline: {
   5043     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
   5044 
   5045     SDValue Ops[6];
   5046     Ops[0] = getRoot();
   5047     Ops[1] = getValue(I.getArgOperand(0));
   5048     Ops[2] = getValue(I.getArgOperand(1));
   5049     Ops[3] = getValue(I.getArgOperand(2));
   5050     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
   5051     Ops[5] = DAG.getSrcValue(F);
   5052 
   5053     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, dl, MVT::Other, Ops, 6);
   5054 
   5055     DAG.setRoot(Res);
   5056     return 0;
   5057   }
   5058   case Intrinsic::adjust_trampoline: {
   5059     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, dl,
   5060                              TLI.getPointerTy(),
   5061                              getValue(I.getArgOperand(0))));
   5062     return 0;
   5063   }
   5064   case Intrinsic::gcroot:
   5065     if (GFI) {
   5066       const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
   5067       const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
   5068 
   5069       FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
   5070       GFI->addStackRoot(FI->getIndex(), TypeMap);
   5071     }
   5072     return 0;
   5073   case Intrinsic::gcread:
   5074   case Intrinsic::gcwrite:
   5075     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
   5076   case Intrinsic::flt_rounds:
   5077     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
   5078     return 0;
   5079 
   5080   case Intrinsic::expect: {
   5081     // Just replace __builtin_expect(exp, c) with EXP.
   5082     setValue(&I, getValue(I.getArgOperand(0)));
   5083     return 0;
   5084   }
   5085 
   5086   case Intrinsic::debugtrap:
   5087   case Intrinsic::trap: {
   5088     StringRef TrapFuncName = TM.Options.getTrapFunctionName();
   5089     if (TrapFuncName.empty()) {
   5090       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
   5091         ISD::TRAP : ISD::DEBUGTRAP;
   5092       DAG.setRoot(DAG.getNode(Op, dl,MVT::Other, getRoot()));
   5093       return 0;
   5094     }
   5095     TargetLowering::ArgListTy Args;
   5096     TargetLowering::
   5097     CallLoweringInfo CLI(getRoot(), I.getType(),
   5098                  false, false, false, false, 0, CallingConv::C,
   5099                  /*isTailCall=*/false,
   5100                  /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
   5101                  DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy()),
   5102                  Args, DAG, dl);
   5103     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
   5104     DAG.setRoot(Result.second);
   5105     return 0;
   5106   }
   5107 
   5108   case Intrinsic::uadd_with_overflow:
   5109   case Intrinsic::sadd_with_overflow:
   5110   case Intrinsic::usub_with_overflow:
   5111   case Intrinsic::ssub_with_overflow:
   5112   case Intrinsic::umul_with_overflow:
   5113   case Intrinsic::smul_with_overflow: {
   5114     ISD::NodeType Op;
   5115     switch (Intrinsic) {
   5116     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
   5117     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
   5118     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
   5119     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
   5120     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
   5121     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
   5122     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
   5123     }
   5124     SDValue Op1 = getValue(I.getArgOperand(0));
   5125     SDValue Op2 = getValue(I.getArgOperand(1));
   5126 
   5127     SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
   5128     setValue(&I, DAG.getNode(Op, dl, VTs, Op1, Op2));
   5129     return 0;
   5130   }
   5131   case Intrinsic::prefetch: {
   5132     SDValue Ops[5];
   5133     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
   5134     Ops[0] = getRoot();
   5135     Ops[1] = getValue(I.getArgOperand(0));
   5136     Ops[2] = getValue(I.getArgOperand(1));
   5137     Ops[3] = getValue(I.getArgOperand(2));
   5138     Ops[4] = getValue(I.getArgOperand(3));
   5139     DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, dl,
   5140                                         DAG.getVTList(MVT::Other),
   5141                                         &Ops[0], 5,
   5142                                         EVT::getIntegerVT(*Context, 8),
   5143                                         MachinePointerInfo(I.getArgOperand(0)),
   5144                                         0, /* align */
   5145                                         false, /* volatile */
   5146                                         rw==0, /* read */
   5147                                         rw==1)); /* write */
   5148     return 0;
   5149   }
   5150   case Intrinsic::lifetime_start:
   5151   case Intrinsic::lifetime_end: {
   5152     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
   5153     // Stack coloring is not enabled in O0, discard region information.
   5154     if (TM.getOptLevel() == CodeGenOpt::None)
   5155       return 0;
   5156 
   5157     SmallVector<Value *, 4> Allocas;
   5158     GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD);
   5159 
   5160     for (SmallVector<Value*, 4>::iterator Object = Allocas.begin(),
   5161          E = Allocas.end(); Object != E; ++Object) {
   5162       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
   5163 
   5164       // Could not find an Alloca.
   5165       if (!LifetimeObject)
   5166         continue;
   5167 
   5168       int FI = FuncInfo.StaticAllocaMap[LifetimeObject];
   5169 
   5170       SDValue Ops[2];
   5171       Ops[0] = getRoot();
   5172       Ops[1] = DAG.getFrameIndex(FI, TLI.getPointerTy(), true);
   5173       unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
   5174 
   5175       Res = DAG.getNode(Opcode, dl, MVT::Other, Ops, 2);
   5176       DAG.setRoot(Res);
   5177     }
   5178     return 0;
   5179   }
   5180   case Intrinsic::invariant_start:
   5181     // Discard region information.
   5182     setValue(&I, DAG.getUNDEF(TLI.getPointerTy()));
   5183     return 0;
   5184   case Intrinsic::invariant_end:
   5185     // Discard region information.
   5186     return 0;
   5187   case Intrinsic::donothing:
   5188     // ignore
   5189     return 0;
   5190   }
   5191 }
   5192 
   5193 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
   5194                                       bool isTailCall,
   5195                                       MachineBasicBlock *LandingPad) {
   5196   PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
   5197   FunctionType *FTy = cast<FunctionType>(PT->getElementType());
   5198   Type *RetTy = FTy->getReturnType();
   5199   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   5200   MCSymbol *BeginLabel = 0;
   5201 
   5202   TargetLowering::ArgListTy Args;
   5203   TargetLowering::ArgListEntry Entry;
   5204   Args.reserve(CS.arg_size());
   5205 
   5206   // Check whether the function can return without sret-demotion.
   5207   SmallVector<ISD::OutputArg, 4> Outs;
   5208   GetReturnInfo(RetTy, CS.getAttributes(), Outs, TLI);
   5209 
   5210   bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
   5211                                            DAG.getMachineFunction(),
   5212                                            FTy->isVarArg(), Outs,
   5213                                            FTy->getContext());
   5214 
   5215   SDValue DemoteStackSlot;
   5216   int DemoteStackIdx = -100;
   5217 
   5218   if (!CanLowerReturn) {
   5219     uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(
   5220                       FTy->getReturnType());
   5221     unsigned Align  = TLI.getDataLayout()->getPrefTypeAlignment(
   5222                       FTy->getReturnType());
   5223     MachineFunction &MF = DAG.getMachineFunction();
   5224     DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
   5225     Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
   5226 
   5227     DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI.getPointerTy());
   5228     Entry.Node = DemoteStackSlot;
   5229     Entry.Ty = StackSlotPtrType;
   5230     Entry.isSExt = false;
   5231     Entry.isZExt = false;
   5232     Entry.isInReg = false;
   5233     Entry.isSRet = true;
   5234     Entry.isNest = false;
   5235     Entry.isByVal = false;
   5236     Entry.Alignment = Align;
   5237     Args.push_back(Entry);
   5238     RetTy = Type::getVoidTy(FTy->getContext());
   5239   }
   5240 
   5241   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
   5242        i != e; ++i) {
   5243     const Value *V = *i;
   5244 
   5245     // Skip empty types
   5246     if (V->getType()->isEmptyTy())
   5247       continue;
   5248 
   5249     SDValue ArgNode = getValue(V);
   5250     Entry.Node = ArgNode; Entry.Ty = V->getType();
   5251 
   5252     unsigned attrInd = i - CS.arg_begin() + 1;
   5253     Entry.isSExt  = CS.paramHasAttr(attrInd, Attribute::SExt);
   5254     Entry.isZExt  = CS.paramHasAttr(attrInd, Attribute::ZExt);
   5255     Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
   5256     Entry.isSRet  = CS.paramHasAttr(attrInd, Attribute::StructRet);
   5257     Entry.isNest  = CS.paramHasAttr(attrInd, Attribute::Nest);
   5258     Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
   5259     Entry.Alignment = CS.getParamAlignment(attrInd);
   5260     Args.push_back(Entry);
   5261   }
   5262 
   5263   if (LandingPad) {
   5264     // Insert a label before the invoke call to mark the try range.  This can be
   5265     // used to detect deletion of the invoke via the MachineModuleInfo.
   5266     BeginLabel = MMI.getContext().CreateTempSymbol();
   5267 
   5268     // For SjLj, keep track of which landing pads go with which invokes
   5269     // so as to maintain the ordering of pads in the LSDA.
   5270     unsigned CallSiteIndex = MMI.getCurrentCallSite();
   5271     if (CallSiteIndex) {
   5272       MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
   5273       LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex);
   5274 
   5275       // Now that the call site is handled, stop tracking it.
   5276       MMI.setCurrentCallSite(0);
   5277     }
   5278 
   5279     // Both PendingLoads and PendingExports must be flushed here;
   5280     // this call might not return.
   5281     (void)getRoot();
   5282     DAG.setRoot(DAG.getEHLabel(getCurDebugLoc(), getControlRoot(), BeginLabel));
   5283   }
   5284 
   5285   // Check if target-independent constraints permit a tail call here.
   5286   // Target-dependent constraints are checked within TLI.LowerCallTo.
   5287   if (isTailCall && !isInTailCallPosition(CS, TLI))
   5288     isTailCall = false;
   5289 
   5290   TargetLowering::
   5291   CallLoweringInfo CLI(getRoot(), RetTy, FTy, isTailCall, Callee, Args, DAG,
   5292                        getCurDebugLoc(), CS);
   5293   std::pair<SDValue,SDValue> Result = TLI.LowerCallTo(CLI);
   5294   assert((isTailCall || Result.second.getNode()) &&
   5295          "Non-null chain expected with non-tail call!");
   5296   assert((Result.second.getNode() || !Result.first.getNode()) &&
   5297          "Null value expected with tail call!");
   5298   if (Result.first.getNode()) {
   5299     setValue(CS.getInstruction(), Result.first);
   5300   } else if (!CanLowerReturn && Result.second.getNode()) {
   5301     // The instruction result is the result of loading from the
   5302     // hidden sret parameter.
   5303     SmallVector<EVT, 1> PVTs;
   5304     Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
   5305 
   5306     ComputeValueVTs(TLI, PtrRetTy, PVTs);
   5307     assert(PVTs.size() == 1 && "Pointers should fit in one register");
   5308     EVT PtrVT = PVTs[0];
   5309 
   5310     SmallVector<EVT, 4> RetTys;
   5311     SmallVector<uint64_t, 4> Offsets;
   5312     RetTy = FTy->getReturnType();
   5313     ComputeValueVTs(TLI, RetTy, RetTys, &Offsets);
   5314 
   5315     unsigned NumValues = RetTys.size();
   5316     SmallVector<SDValue, 4> Values(NumValues);
   5317     SmallVector<SDValue, 4> Chains(NumValues);
   5318 
   5319     for (unsigned i = 0; i < NumValues; ++i) {
   5320       SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT,
   5321                                 DemoteStackSlot,
   5322                                 DAG.getConstant(Offsets[i], PtrVT));
   5323       SDValue L = DAG.getLoad(RetTys[i], getCurDebugLoc(), Result.second, Add,
   5324                   MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
   5325                               false, false, false, 1);
   5326       Values[i] = L;
   5327       Chains[i] = L.getValue(1);
   5328     }
   5329 
   5330     SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
   5331                                 MVT::Other, &Chains[0], NumValues);
   5332     PendingLoads.push_back(Chain);
   5333 
   5334     setValue(CS.getInstruction(),
   5335              DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
   5336                          DAG.getVTList(&RetTys[0], RetTys.size()),
   5337                          &Values[0], Values.size()));
   5338   }
   5339 
   5340   // Assign order to nodes here. If the call does not produce a result, it won't
   5341   // be mapped to a SDNode and visit() will not assign it an order number.
   5342   if (!Result.second.getNode()) {
   5343     // As a special case, a null chain means that a tail call has been emitted and
   5344     // the DAG root is already updated.
   5345     HasTailCall = true;
   5346     ++SDNodeOrder;
   5347     AssignOrderingToNode(DAG.getRoot().getNode());
   5348   } else {
   5349     DAG.setRoot(Result.second);
   5350     ++SDNodeOrder;
   5351     AssignOrderingToNode(Result.second.getNode());
   5352   }
   5353 
   5354   if (LandingPad) {
   5355     // Insert a label at the end of the invoke call to mark the try range.  This
   5356     // can be used to detect deletion of the invoke via the MachineModuleInfo.
   5357     MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol();
   5358     DAG.setRoot(DAG.getEHLabel(getCurDebugLoc(), getRoot(), EndLabel));
   5359 
   5360     // Inform MachineModuleInfo of range.
   5361     MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
   5362   }
   5363 }
   5364 
   5365 /// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
   5366 /// value is equal or not-equal to zero.
   5367 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
   5368   for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
   5369        UI != E; ++UI) {
   5370     if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
   5371       if (IC->isEquality())
   5372         if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
   5373           if (C->isNullValue())
   5374             continue;
   5375     // Unknown instruction.
   5376     return false;
   5377   }
   5378   return true;
   5379 }
   5380 
   5381 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
   5382                              Type *LoadTy,
   5383                              SelectionDAGBuilder &Builder) {
   5384 
   5385   // Check to see if this load can be trivially constant folded, e.g. if the
   5386   // input is from a string literal.
   5387   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
   5388     // Cast pointer to the type we really want to load.
   5389     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
   5390                                          PointerType::getUnqual(LoadTy));
   5391 
   5392     if (const Constant *LoadCst =
   5393           ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
   5394                                        Builder.TD))
   5395       return Builder.getValue(LoadCst);
   5396   }
   5397 
   5398   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
   5399   // still constant memory, the input chain can be the entry node.
   5400   SDValue Root;
   5401   bool ConstantMemory = false;
   5402 
   5403   // Do not serialize (non-volatile) loads of constant memory with anything.
   5404   if (Builder.AA->pointsToConstantMemory(PtrVal)) {
   5405     Root = Builder.DAG.getEntryNode();
   5406     ConstantMemory = true;
   5407   } else {
   5408     // Do not serialize non-volatile loads against each other.
   5409     Root = Builder.DAG.getRoot();
   5410   }
   5411 
   5412   SDValue Ptr = Builder.getValue(PtrVal);
   5413   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurDebugLoc(), Root,
   5414                                         Ptr, MachinePointerInfo(PtrVal),
   5415                                         false /*volatile*/,
   5416                                         false /*nontemporal*/,
   5417                                         false /*isinvariant*/, 1 /* align=1 */);
   5418 
   5419   if (!ConstantMemory)
   5420     Builder.PendingLoads.push_back(LoadVal.getValue(1));
   5421   return LoadVal;
   5422 }
   5423 
   5424 
   5425 /// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
   5426 /// If so, return true and lower it, otherwise return false and it will be
   5427 /// lowered like a normal call.
   5428 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
   5429   // Verify that the prototype makes sense.  int memcmp(void*,void*,size_t)
   5430   if (I.getNumArgOperands() != 3)
   5431     return false;
   5432 
   5433   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
   5434   if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
   5435       !I.getArgOperand(2)->getType()->isIntegerTy() ||
   5436       !I.getType()->isIntegerTy())
   5437     return false;
   5438 
   5439   const ConstantInt *Size = dyn_cast<ConstantInt>(I.getArgOperand(2));
   5440 
   5441   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
   5442   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
   5443   if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
   5444     bool ActuallyDoIt = true;
   5445     MVT LoadVT;
   5446     Type *LoadTy;
   5447     switch (Size->getZExtValue()) {
   5448     default:
   5449       LoadVT = MVT::Other;
   5450       LoadTy = 0;
   5451       ActuallyDoIt = false;
   5452       break;
   5453     case 2:
   5454       LoadVT = MVT::i16;
   5455       LoadTy = Type::getInt16Ty(Size->getContext());
   5456       break;
   5457     case 4:
   5458       LoadVT = MVT::i32;
   5459       LoadTy = Type::getInt32Ty(Size->getContext());
   5460       break;
   5461     case 8:
   5462       LoadVT = MVT::i64;
   5463       LoadTy = Type::getInt64Ty(Size->getContext());
   5464       break;
   5465         /*
   5466     case 16:
   5467       LoadVT = MVT::v4i32;
   5468       LoadTy = Type::getInt32Ty(Size->getContext());
   5469       LoadTy = VectorType::get(LoadTy, 4);
   5470       break;
   5471          */
   5472     }
   5473 
   5474     // This turns into unaligned loads.  We only do this if the target natively
   5475     // supports the MVT we'll be loading or if it is small enough (<= 4) that
   5476     // we'll only produce a small number of byte loads.
   5477 
   5478     // Require that we can find a legal MVT, and only do this if the target
   5479     // supports unaligned loads of that type.  Expanding into byte loads would
   5480     // bloat the code.
   5481     if (ActuallyDoIt && Size->getZExtValue() > 4) {
   5482       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
   5483       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
   5484       if (!TLI.isTypeLegal(LoadVT) ||!TLI.allowsUnalignedMemoryAccesses(LoadVT))
   5485         ActuallyDoIt = false;
   5486     }
   5487 
   5488     if (ActuallyDoIt) {
   5489       SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
   5490       SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
   5491 
   5492       SDValue Res = DAG.getSetCC(getCurDebugLoc(), MVT::i1, LHSVal, RHSVal,
   5493                                  ISD::SETNE);
   5494       EVT CallVT = TLI.getValueType(I.getType(), true);
   5495       setValue(&I, DAG.getZExtOrTrunc(Res, getCurDebugLoc(), CallVT));
   5496       return true;
   5497     }
   5498   }
   5499 
   5500 
   5501   return false;
   5502 }
   5503 
   5504 /// visitUnaryFloatCall - If a call instruction is a unary floating-point
   5505 /// operation (as expected), translate it to an SDNode with the specified opcode
   5506 /// and return true.
   5507 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
   5508                                               unsigned Opcode) {
   5509   // Sanity check that it really is a unary floating-point call.
   5510   if (I.getNumArgOperands() != 1 ||
   5511       !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
   5512       I.getType() != I.getArgOperand(0)->getType() ||
   5513       !I.onlyReadsMemory())
   5514     return false;
   5515 
   5516   SDValue Tmp = getValue(I.getArgOperand(0));
   5517   setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(), Tmp.getValueType(), Tmp));
   5518   return true;
   5519 }
   5520 
   5521 void SelectionDAGBuilder::visitCall(const CallInst &I) {
   5522   // Handle inline assembly differently.
   5523   if (isa<InlineAsm>(I.getCalledValue())) {
   5524     visitInlineAsm(&I);
   5525     return;
   5526   }
   5527 
   5528   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
   5529   ComputeUsesVAFloatArgument(I, &MMI);
   5530 
   5531   const char *RenameFn = 0;
   5532   if (Function *F = I.getCalledFunction()) {
   5533     if (F->isDeclaration()) {
   5534       if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
   5535         if (unsigned IID = II->getIntrinsicID(F)) {
   5536           RenameFn = visitIntrinsicCall(I, IID);
   5537           if (!RenameFn)
   5538             return;
   5539         }
   5540       }
   5541       if (unsigned IID = F->getIntrinsicID()) {
   5542         RenameFn = visitIntrinsicCall(I, IID);
   5543         if (!RenameFn)
   5544           return;
   5545       }
   5546     }
   5547 
   5548     // Check for well-known libc/libm calls.  If the function is internal, it
   5549     // can't be a library call.
   5550     LibFunc::Func Func;
   5551     if (!F->hasLocalLinkage() && F->hasName() &&
   5552         LibInfo->getLibFunc(F->getName(), Func) &&
   5553         LibInfo->hasOptimizedCodeGen(Func)) {
   5554       switch (Func) {
   5555       default: break;
   5556       case LibFunc::copysign:
   5557       case LibFunc::copysignf:
   5558       case LibFunc::copysignl:
   5559         if (I.getNumArgOperands() == 2 &&   // Basic sanity checks.
   5560             I.getArgOperand(0)->getType()->isFloatingPointTy() &&
   5561             I.getType() == I.getArgOperand(0)->getType() &&
   5562             I.getType() == I.getArgOperand(1)->getType() &&
   5563             I.onlyReadsMemory()) {
   5564           SDValue LHS = getValue(I.getArgOperand(0));
   5565           SDValue RHS = getValue(I.getArgOperand(1));
   5566           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
   5567                                    LHS.getValueType(), LHS, RHS));
   5568           return;
   5569         }
   5570         break;
   5571       case LibFunc::fabs:
   5572       case LibFunc::fabsf:
   5573       case LibFunc::fabsl:
   5574         if (visitUnaryFloatCall(I, ISD::FABS))
   5575           return;
   5576         break;
   5577       case LibFunc::sin:
   5578       case LibFunc::sinf:
   5579       case LibFunc::sinl:
   5580         if (visitUnaryFloatCall(I, ISD::FSIN))
   5581           return;
   5582         break;
   5583       case LibFunc::cos:
   5584       case LibFunc::cosf:
   5585       case LibFunc::cosl:
   5586         if (visitUnaryFloatCall(I, ISD::FCOS))
   5587           return;
   5588         break;
   5589       case LibFunc::sqrt:
   5590       case LibFunc::sqrtf:
   5591       case LibFunc::sqrtl:
   5592         if (visitUnaryFloatCall(I, ISD::FSQRT))
   5593           return;
   5594         break;
   5595       case LibFunc::floor:
   5596       case LibFunc::floorf:
   5597       case LibFunc::floorl:
   5598         if (visitUnaryFloatCall(I, ISD::FFLOOR))
   5599           return;
   5600         break;
   5601       case LibFunc::nearbyint:
   5602       case LibFunc::nearbyintf:
   5603       case LibFunc::nearbyintl:
   5604         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
   5605           return;
   5606         break;
   5607       case LibFunc::ceil:
   5608       case LibFunc::ceilf:
   5609       case LibFunc::ceill:
   5610         if (visitUnaryFloatCall(I, ISD::FCEIL))
   5611           return;
   5612         break;
   5613       case LibFunc::rint:
   5614       case LibFunc::rintf:
   5615       case LibFunc::rintl:
   5616         if (visitUnaryFloatCall(I, ISD::FRINT))
   5617           return;
   5618         break;
   5619       case LibFunc::trunc:
   5620       case LibFunc::truncf:
   5621       case LibFunc::truncl:
   5622         if (visitUnaryFloatCall(I, ISD::FTRUNC))
   5623           return;
   5624         break;
   5625       case LibFunc::log2:
   5626       case LibFunc::log2f:
   5627       case LibFunc::log2l:
   5628         if (visitUnaryFloatCall(I, ISD::FLOG2))
   5629           return;
   5630         break;
   5631       case LibFunc::exp2:
   5632       case LibFunc::exp2f:
   5633       case LibFunc::exp2l:
   5634         if (visitUnaryFloatCall(I, ISD::FEXP2))
   5635           return;
   5636         break;
   5637       case LibFunc::memcmp:
   5638         if (visitMemCmpCall(I))
   5639           return;
   5640         break;
   5641       }
   5642     }
   5643   }
   5644 
   5645   SDValue Callee;
   5646   if (!RenameFn)
   5647     Callee = getValue(I.getCalledValue());
   5648   else
   5649     Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
   5650 
   5651   // Check if we can potentially perform a tail call. More detailed checking is
   5652   // be done within LowerCallTo, after more information about the call is known.
   5653   LowerCallTo(&I, Callee, I.isTailCall());
   5654 }
   5655 
   5656 namespace {
   5657 
   5658 /// AsmOperandInfo - This contains information for each constraint that we are
   5659 /// lowering.
   5660 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
   5661 public:
   5662   /// CallOperand - If this is the result output operand or a clobber
   5663   /// this is null, otherwise it is the incoming operand to the CallInst.
   5664   /// This gets modified as the asm is processed.
   5665   SDValue CallOperand;
   5666 
   5667   /// AssignedRegs - If this is a register or register class operand, this
   5668   /// contains the set of register corresponding to the operand.
   5669   RegsForValue AssignedRegs;
   5670 
   5671   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
   5672     : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
   5673   }
   5674 
   5675   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
   5676   /// corresponds to.  If there is no Value* for this operand, it returns
   5677   /// MVT::Other.
   5678   EVT getCallOperandValEVT(LLVMContext &Context,
   5679                            const TargetLowering &TLI,
   5680                            const DataLayout *TD) const {
   5681     if (CallOperandVal == 0) return MVT::Other;
   5682 
   5683     if (isa<BasicBlock>(CallOperandVal))
   5684       return TLI.getPointerTy();
   5685 
   5686     llvm::Type *OpTy = CallOperandVal->getType();
   5687 
   5688     // FIXME: code duplicated from TargetLowering::ParseConstraints().
   5689     // If this is an indirect operand, the operand is a pointer to the
   5690     // accessed type.
   5691     if (isIndirect) {
   5692       llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
   5693       if (!PtrTy)
   5694         report_fatal_error("Indirect operand for inline asm not a pointer!");
   5695       OpTy = PtrTy->getElementType();
   5696     }
   5697 
   5698     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
   5699     if (StructType *STy = dyn_cast<StructType>(OpTy))
   5700       if (STy->getNumElements() == 1)
   5701         OpTy = STy->getElementType(0);
   5702 
   5703     // If OpTy is not a single value, it may be a struct/union that we
   5704     // can tile with integers.
   5705     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
   5706       unsigned BitSize = TD->getTypeSizeInBits(OpTy);
   5707       switch (BitSize) {
   5708       default: break;
   5709       case 1:
   5710       case 8:
   5711       case 16:
   5712       case 32:
   5713       case 64:
   5714       case 128:
   5715         OpTy = IntegerType::get(Context, BitSize);
   5716         break;
   5717       }
   5718     }
   5719 
   5720     return TLI.getValueType(OpTy, true);
   5721   }
   5722 };
   5723 
   5724 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
   5725 
   5726 } // end anonymous namespace
   5727 
   5728 /// GetRegistersForValue - Assign registers (virtual or physical) for the
   5729 /// specified operand.  We prefer to assign virtual registers, to allow the
   5730 /// register allocator to handle the assignment process.  However, if the asm
   5731 /// uses features that we can't model on machineinstrs, we have SDISel do the
   5732 /// allocation.  This produces generally horrible, but correct, code.
   5733 ///
   5734 ///   OpInfo describes the operand.
   5735 ///
   5736 static void GetRegistersForValue(SelectionDAG &DAG,
   5737                                  const TargetLowering &TLI,
   5738                                  DebugLoc DL,
   5739                                  SDISelAsmOperandInfo &OpInfo) {
   5740   LLVMContext &Context = *DAG.getContext();
   5741 
   5742   MachineFunction &MF = DAG.getMachineFunction();
   5743   SmallVector<unsigned, 4> Regs;
   5744 
   5745   // If this is a constraint for a single physreg, or a constraint for a
   5746   // register class, find it.
   5747   std::pair<unsigned, const TargetRegisterClass*> PhysReg =
   5748     TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
   5749                                      OpInfo.ConstraintVT);
   5750 
   5751   unsigned NumRegs = 1;
   5752   if (OpInfo.ConstraintVT != MVT::Other) {
   5753     // If this is a FP input in an integer register (or visa versa) insert a bit
   5754     // cast of the input value.  More generally, handle any case where the input
   5755     // value disagrees with the register class we plan to stick this in.
   5756     if (OpInfo.Type == InlineAsm::isInput &&
   5757         PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
   5758       // Try to convert to the first EVT that the reg class contains.  If the
   5759       // types are identical size, use a bitcast to convert (e.g. two differing
   5760       // vector types).
   5761       MVT RegVT = *PhysReg.second->vt_begin();
   5762       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
   5763         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
   5764                                          RegVT, OpInfo.CallOperand);
   5765         OpInfo.ConstraintVT = RegVT;
   5766       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
   5767         // If the input is a FP value and we want it in FP registers, do a
   5768         // bitcast to the corresponding integer type.  This turns an f64 value
   5769         // into i64, which can be passed with two i32 values on a 32-bit
   5770         // machine.
   5771         RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
   5772         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
   5773                                          RegVT, OpInfo.CallOperand);
   5774         OpInfo.ConstraintVT = RegVT;
   5775       }
   5776     }
   5777 
   5778     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
   5779   }
   5780 
   5781   MVT RegVT;
   5782   EVT ValueVT = OpInfo.ConstraintVT;
   5783 
   5784   // If this is a constraint for a specific physical register, like {r17},
   5785   // assign it now.
   5786   if (unsigned AssignedReg = PhysReg.first) {
   5787     const TargetRegisterClass *RC = PhysReg.second;
   5788     if (OpInfo.ConstraintVT == MVT::Other)
   5789       ValueVT = *RC->vt_begin();
   5790 
   5791     // Get the actual register value type.  This is important, because the user
   5792     // may have asked for (e.g.) the AX register in i32 type.  We need to
   5793     // remember that AX is actually i16 to get the right extension.
   5794     RegVT = *RC->vt_begin();
   5795 
   5796     // This is a explicit reference to a physical register.
   5797     Regs.push_back(AssignedReg);
   5798 
   5799     // If this is an expanded reference, add the rest of the regs to Regs.
   5800     if (NumRegs != 1) {
   5801       TargetRegisterClass::iterator I = RC->begin();
   5802       for (; *I != AssignedReg; ++I)
   5803         assert(I != RC->end() && "Didn't find reg!");
   5804 
   5805       // Already added the first reg.
   5806       --NumRegs; ++I;
   5807       for (; NumRegs; --NumRegs, ++I) {
   5808         assert(I != RC->end() && "Ran out of registers to allocate!");
   5809         Regs.push_back(*I);
   5810       }
   5811     }
   5812 
   5813     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
   5814     return;
   5815   }
   5816 
   5817   // Otherwise, if this was a reference to an LLVM register class, create vregs
   5818   // for this reference.
   5819   if (const TargetRegisterClass *RC = PhysReg.second) {
   5820     RegVT = *RC->vt_begin();
   5821     if (OpInfo.ConstraintVT == MVT::Other)
   5822       ValueVT = RegVT;
   5823 
   5824     // Create the appropriate number of virtual registers.
   5825     MachineRegisterInfo &RegInfo = MF.getRegInfo();
   5826     for (; NumRegs; --NumRegs)
   5827       Regs.push_back(RegInfo.createVirtualRegister(RC));
   5828 
   5829     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
   5830     return;
   5831   }
   5832 
   5833   // Otherwise, we couldn't allocate enough registers for this.
   5834 }
   5835 
   5836 /// visitInlineAsm - Handle a call to an InlineAsm object.
   5837 ///
   5838 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
   5839   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
   5840 
   5841   /// ConstraintOperands - Information about all of the constraints.
   5842   SDISelAsmOperandInfoVector ConstraintOperands;
   5843 
   5844   TargetLowering::AsmOperandInfoVector
   5845     TargetConstraints = TLI.ParseConstraints(CS);
   5846 
   5847   bool hasMemory = false;
   5848 
   5849   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
   5850   unsigned ResNo = 0;   // ResNo - The result number of the next output.
   5851   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
   5852     ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
   5853     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
   5854 
   5855     MVT OpVT = MVT::Other;
   5856 
   5857     // Compute the value type for each operand.
   5858     switch (OpInfo.Type) {
   5859     case InlineAsm::isOutput:
   5860       // Indirect outputs just consume an argument.
   5861       if (OpInfo.isIndirect) {
   5862         OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
   5863         break;
   5864       }
   5865 
   5866       // The return value of the call is this value.  As such, there is no
   5867       // corresponding argument.
   5868       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
   5869       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
   5870         OpVT = TLI.getSimpleValueType(STy->getElementType(ResNo));
   5871       } else {
   5872         assert(ResNo == 0 && "Asm only has one result!");
   5873         OpVT = TLI.getSimpleValueType(CS.getType());
   5874       }
   5875       ++ResNo;
   5876       break;
   5877     case InlineAsm::isInput:
   5878       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
   5879       break;
   5880     case InlineAsm::isClobber:
   5881       // Nothing to do.
   5882       break;
   5883     }
   5884 
   5885     // If this is an input or an indirect output, process the call argument.
   5886     // BasicBlocks are labels, currently appearing only in asm's.
   5887     if (OpInfo.CallOperandVal) {
   5888       if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
   5889         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
   5890       } else {
   5891         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
   5892       }
   5893 
   5894       OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD).
   5895         getSimpleVT();
   5896     }
   5897 
   5898     OpInfo.ConstraintVT = OpVT;
   5899 
   5900     // Indirect operand accesses access memory.
   5901     if (OpInfo.isIndirect)
   5902       hasMemory = true;
   5903     else {
   5904       for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
   5905         TargetLowering::ConstraintType
   5906           CType = TLI.getConstraintType(OpInfo.Codes[j]);
   5907         if (CType == TargetLowering::C_Memory) {
   5908           hasMemory = true;
   5909           break;
   5910         }
   5911       }
   5912     }
   5913   }
   5914 
   5915   SDValue Chain, Flag;
   5916 
   5917   // We won't need to flush pending loads if this asm doesn't touch
   5918   // memory and is nonvolatile.
   5919   if (hasMemory || IA->hasSideEffects())
   5920     Chain = getRoot();
   5921   else
   5922     Chain = DAG.getRoot();
   5923 
   5924   // Second pass over the constraints: compute which constraint option to use
   5925   // and assign registers to constraints that want a specific physreg.
   5926   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
   5927     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
   5928 
   5929     // If this is an output operand with a matching input operand, look up the
   5930     // matching input. If their types mismatch, e.g. one is an integer, the
   5931     // other is floating point, or their sizes are different, flag it as an
   5932     // error.
   5933     if (OpInfo.hasMatchingInput()) {
   5934       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
   5935 
   5936       if (OpInfo.ConstraintVT != Input.ConstraintVT) {
   5937         std::pair<unsigned, const TargetRegisterClass*> MatchRC =
   5938           TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
   5939                                            OpInfo.ConstraintVT);
   5940         std::pair<unsigned, const TargetRegisterClass*> InputRC =
   5941           TLI.getRegForInlineAsmConstraint(Input.ConstraintCode,
   5942                                            Input.ConstraintVT);
   5943         if ((OpInfo.ConstraintVT.isInteger() !=
   5944              Input.ConstraintVT.isInteger()) ||
   5945             (MatchRC.second != InputRC.second)) {
   5946           report_fatal_error("Unsupported asm: input constraint"
   5947                              " with a matching output constraint of"
   5948                              " incompatible type!");
   5949         }
   5950         Input.ConstraintVT = OpInfo.ConstraintVT;
   5951       }
   5952     }
   5953 
   5954     // Compute the constraint code and ConstraintType to use.
   5955     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
   5956 
   5957     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
   5958         OpInfo.Type == InlineAsm::isClobber)
   5959       continue;
   5960 
   5961     // If this is a memory input, and if the operand is not indirect, do what we
   5962     // need to to provide an address for the memory input.
   5963     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
   5964         !OpInfo.isIndirect) {
   5965       assert((OpInfo.isMultipleAlternative ||
   5966               (OpInfo.Type == InlineAsm::isInput)) &&
   5967              "Can only indirectify direct input operands!");
   5968 
   5969       // Memory operands really want the address of the value.  If we don't have
   5970       // an indirect input, put it in the constpool if we can, otherwise spill
   5971       // it to a stack slot.
   5972       // TODO: This isn't quite right. We need to handle these according to
   5973       // the addressing mode that the constraint wants. Also, this may take
   5974       // an additional register for the computation and we don't want that
   5975       // either.
   5976 
   5977       // If the operand is a float, integer, or vector constant, spill to a
   5978       // constant pool entry to get its address.
   5979       const Value *OpVal = OpInfo.CallOperandVal;
   5980       if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
   5981           isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
   5982         OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
   5983                                                  TLI.getPointerTy());
   5984       } else {
   5985         // Otherwise, create a stack slot and emit a store to it before the
   5986         // asm.
   5987         Type *Ty = OpVal->getType();
   5988         uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
   5989         unsigned Align  = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
   5990         MachineFunction &MF = DAG.getMachineFunction();
   5991         int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
   5992         SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
   5993         Chain = DAG.getStore(Chain, getCurDebugLoc(),
   5994                              OpInfo.CallOperand, StackSlot,
   5995                              MachinePointerInfo::getFixedStack(SSFI),
   5996                              false, false, 0);
   5997         OpInfo.CallOperand = StackSlot;
   5998       }
   5999 
   6000       // There is no longer a Value* corresponding to this operand.
   6001       OpInfo.CallOperandVal = 0;
   6002 
   6003       // It is now an indirect operand.
   6004       OpInfo.isIndirect = true;
   6005     }
   6006 
   6007     // If this constraint is for a specific register, allocate it before
   6008     // anything else.
   6009     if (OpInfo.ConstraintType == TargetLowering::C_Register)
   6010       GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo);
   6011   }
   6012 
   6013   // Second pass - Loop over all of the operands, assigning virtual or physregs
   6014   // to register class operands.
   6015   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
   6016     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
   6017 
   6018     // C_Register operands have already been allocated, Other/Memory don't need
   6019     // to be.
   6020     if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
   6021       GetRegistersForValue(DAG, TLI, getCurDebugLoc(), OpInfo);
   6022   }
   6023 
   6024   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
   6025   std::vector<SDValue> AsmNodeOperands;
   6026   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
   6027   AsmNodeOperands.push_back(
   6028           DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
   6029                                       TLI.getPointerTy()));
   6030 
   6031   // If we have a !srcloc metadata node associated with it, we want to attach
   6032   // this to the ultimately generated inline asm machineinstr.  To do this, we
   6033   // pass in the third operand as this (potentially null) inline asm MDNode.
   6034   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
   6035   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
   6036 
   6037   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
   6038   // bits as operand 3.
   6039   unsigned ExtraInfo = 0;
   6040   if (IA->hasSideEffects())
   6041     ExtraInfo |= InlineAsm::Extra_HasSideEffects;
   6042   if (IA->isAlignStack())
   6043     ExtraInfo |= InlineAsm::Extra_IsAlignStack;
   6044   // Set the asm dialect.
   6045   ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
   6046 
   6047   // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
   6048   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
   6049     TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
   6050 
   6051     // Compute the constraint code and ConstraintType to use.
   6052     TLI.ComputeConstraintToUse(OpInfo, SDValue());
   6053 
   6054     // Ideally, we would only check against memory constraints.  However, the
   6055     // meaning of an other constraint can be target-specific and we can't easily
   6056     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
   6057     // for other constriants as well.
   6058     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
   6059         OpInfo.ConstraintType == TargetLowering::C_Other) {
   6060       if (OpInfo.Type == InlineAsm::isInput)
   6061         ExtraInfo |= InlineAsm::Extra_MayLoad;
   6062       else if (OpInfo.Type == InlineAsm::isOutput)
   6063         ExtraInfo |= InlineAsm::Extra_MayStore;
   6064       else if (OpInfo.Type == InlineAsm::isClobber)
   6065         ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
   6066     }
   6067   }
   6068 
   6069   AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
   6070                                                   TLI.getPointerTy()));
   6071 
   6072   // Loop over all of the inputs, copying the operand values into the
   6073   // appropriate registers and processing the output regs.
   6074   RegsForValue RetValRegs;
   6075 
   6076   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
   6077   std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
   6078 
   6079   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
   6080     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
   6081 
   6082     switch (OpInfo.Type) {
   6083     case InlineAsm::isOutput: {
   6084       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
   6085           OpInfo.ConstraintType != TargetLowering::C_Register) {
   6086         // Memory output, or 'other' output (e.g. 'X' constraint).
   6087         assert(OpInfo.isIndirect && "Memory output must be indirect operand");
   6088 
   6089         // Add information to the INLINEASM node to know about this output.
   6090         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
   6091         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
   6092                                                         TLI.getPointerTy()));
   6093         AsmNodeOperands.push_back(OpInfo.CallOperand);
   6094         break;
   6095       }
   6096 
   6097       // Otherwise, this is a register or register class output.
   6098 
   6099       // Copy the output from the appropriate register.  Find a register that
   6100       // we can use.
   6101       if (OpInfo.AssignedRegs.Regs.empty()) {
   6102         LLVMContext &Ctx = *DAG.getContext();
   6103         Ctx.emitError(CS.getInstruction(),
   6104                       "couldn't allocate output register for constraint '" +
   6105                            Twine(OpInfo.ConstraintCode) + "'");
   6106         break;
   6107       }
   6108 
   6109       // If this is an indirect operand, store through the pointer after the
   6110       // asm.
   6111       if (OpInfo.isIndirect) {
   6112         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
   6113                                                       OpInfo.CallOperandVal));
   6114       } else {
   6115         // This is the result value of the call.
   6116         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
   6117         // Concatenate this output onto the outputs list.
   6118         RetValRegs.append(OpInfo.AssignedRegs);
   6119       }
   6120 
   6121       // Add information to the INLINEASM node to know that this register is
   6122       // set.
   6123       OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
   6124                                            InlineAsm::Kind_RegDefEarlyClobber :
   6125                                                InlineAsm::Kind_RegDef,
   6126                                                false,
   6127                                                0,
   6128                                                DAG,
   6129                                                AsmNodeOperands);
   6130       break;
   6131     }
   6132     case InlineAsm::isInput: {
   6133       SDValue InOperandVal = OpInfo.CallOperand;
   6134 
   6135       if (OpInfo.isMatchingInputConstraint()) {   // Matching constraint?
   6136         // If this is required to match an output register we have already set,
   6137         // just use its register.
   6138         unsigned OperandNo = OpInfo.getMatchedOperand();
   6139 
   6140         // Scan until we find the definition we already emitted of this operand.
   6141         // When we find it, create a RegsForValue operand.
   6142         unsigned CurOp = InlineAsm::Op_FirstOperand;
   6143         for (; OperandNo; --OperandNo) {
   6144           // Advance to the next operand.
   6145           unsigned OpFlag =
   6146             cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
   6147           assert((InlineAsm::isRegDefKind(OpFlag) ||
   6148                   InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
   6149                   InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
   6150           CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
   6151         }
   6152 
   6153         unsigned OpFlag =
   6154           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
   6155         if (InlineAsm::isRegDefKind(OpFlag) ||
   6156             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
   6157           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
   6158           if (OpInfo.isIndirect) {
   6159             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
   6160             LLVMContext &Ctx = *DAG.getContext();
   6161             Ctx.emitError(CS.getInstruction(),  "inline asm not supported yet:"
   6162                           " don't know how to handle tied "
   6163                           "indirect register inputs");
   6164             report_fatal_error("Cannot handle indirect register inputs!");
   6165           }
   6166 
   6167           RegsForValue MatchedRegs;
   6168           MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
   6169           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
   6170           MatchedRegs.RegVTs.push_back(RegVT);
   6171           MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
   6172           for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
   6173                i != e; ++i)
   6174             MatchedRegs.Regs.push_back
   6175               (RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
   6176 
   6177           // Use the produced MatchedRegs object to
   6178           MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
   6179                                     Chain, &Flag, CS.getInstruction());
   6180           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
   6181                                            true, OpInfo.getMatchedOperand(),
   6182                                            DAG, AsmNodeOperands);
   6183           break;
   6184         }
   6185 
   6186         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
   6187         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
   6188                "Unexpected number of operands");
   6189         // Add information to the INLINEASM node to know about this input.
   6190         // See InlineAsm.h isUseOperandTiedToDef.
   6191         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
   6192                                                     OpInfo.getMatchedOperand());
   6193         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
   6194                                                         TLI.getPointerTy()));
   6195         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
   6196         break;
   6197       }
   6198 
   6199       // Treat indirect 'X' constraint as memory.
   6200       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
   6201           OpInfo.isIndirect)
   6202         OpInfo.ConstraintType = TargetLowering::C_Memory;
   6203 
   6204       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
   6205         std::vector<SDValue> Ops;
   6206         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
   6207                                          Ops, DAG);
   6208         if (Ops.empty()) {
   6209           LLVMContext &Ctx = *DAG.getContext();
   6210           Ctx.emitError(CS.getInstruction(),
   6211                         "invalid operand for inline asm constraint '" +
   6212                         Twine(OpInfo.ConstraintCode) + "'");
   6213           break;
   6214         }
   6215 
   6216         // Add information to the INLINEASM node to know about this input.
   6217         unsigned ResOpType =
   6218           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
   6219         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
   6220                                                         TLI.getPointerTy()));
   6221         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
   6222         break;
   6223       }
   6224 
   6225       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
   6226         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
   6227         assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
   6228                "Memory operands expect pointer values");
   6229 
   6230         // Add information to the INLINEASM node to know about this input.
   6231         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
   6232         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
   6233                                                         TLI.getPointerTy()));
   6234         AsmNodeOperands.push_back(InOperandVal);
   6235         break;
   6236       }
   6237 
   6238       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
   6239               OpInfo.ConstraintType == TargetLowering::C_Register) &&
   6240              "Unknown constraint type!");
   6241 
   6242       // TODO: Support this.
   6243       if (OpInfo.isIndirect) {
   6244         LLVMContext &Ctx = *DAG.getContext();
   6245         Ctx.emitError(CS.getInstruction(),
   6246                       "Don't know how to handle indirect register inputs yet "
   6247                       "for constraint '" + Twine(OpInfo.ConstraintCode) + "'");
   6248         break;
   6249       }
   6250 
   6251       // Copy the input into the appropriate registers.
   6252       if (OpInfo.AssignedRegs.Regs.empty()) {
   6253         LLVMContext &Ctx = *DAG.getContext();
   6254         Ctx.emitError(CS.getInstruction(),
   6255                       "couldn't allocate input reg for constraint '" +
   6256                            Twine(OpInfo.ConstraintCode) + "'");
   6257         break;
   6258       }
   6259 
   6260       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
   6261                                         Chain, &Flag, CS.getInstruction());
   6262 
   6263       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
   6264                                                DAG, AsmNodeOperands);
   6265       break;
   6266     }
   6267     case InlineAsm::isClobber: {
   6268       // Add the clobbered value to the operand list, so that the register
   6269       // allocator is aware that the physreg got clobbered.
   6270       if (!OpInfo.AssignedRegs.Regs.empty())
   6271         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
   6272                                                  false, 0, DAG,
   6273                                                  AsmNodeOperands);
   6274       break;
   6275     }
   6276     }
   6277   }
   6278 
   6279   // Finish up input operands.  Set the input chain and add the flag last.
   6280   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
   6281   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
   6282 
   6283   Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
   6284                       DAG.getVTList(MVT::Other, MVT::Glue),
   6285                       &AsmNodeOperands[0], AsmNodeOperands.size());
   6286   Flag = Chain.getValue(1);
   6287 
   6288   // If this asm returns a register value, copy the result from that register
   6289   // and set it as the value of the call.
   6290   if (!RetValRegs.Regs.empty()) {
   6291     SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
   6292                                              Chain, &Flag, CS.getInstruction());
   6293 
   6294     // FIXME: Why don't we do this for inline asms with MRVs?
   6295     if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
   6296       EVT ResultType = TLI.getValueType(CS.getType());
   6297 
   6298       // If any of the results of the inline asm is a vector, it may have the
   6299       // wrong width/num elts.  This can happen for register classes that can
   6300       // contain multiple different value types.  The preg or vreg allocated may
   6301       // not have the same VT as was expected.  Convert it to the right type
   6302       // with bit_convert.
   6303       if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
   6304         Val = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
   6305                           ResultType, Val);
   6306 
   6307       } else if (ResultType != Val.getValueType() &&
   6308                  ResultType.isInteger() && Val.getValueType().isInteger()) {
   6309         // If a result value was tied to an input value, the computed result may
   6310         // have a wider width than the expected result.  Extract the relevant
   6311         // portion.
   6312         Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
   6313       }
   6314 
   6315       assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
   6316     }
   6317 
   6318     setValue(CS.getInstruction(), Val);
   6319     // Don't need to use this as a chain in this case.
   6320     if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
   6321       return;
   6322   }
   6323 
   6324   std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
   6325 
   6326   // Process indirect outputs, first output all of the flagged copies out of
   6327   // physregs.
   6328   for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
   6329     RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
   6330     const Value *Ptr = IndirectStoresToEmit[i].second;
   6331     SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(),
   6332                                              Chain, &Flag, IA);
   6333     StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
   6334   }
   6335 
   6336   // Emit the non-flagged stores from the physregs.
   6337   SmallVector<SDValue, 8> OutChains;
   6338   for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
   6339     SDValue Val = DAG.getStore(Chain, getCurDebugLoc(),
   6340                                StoresToEmit[i].first,
   6341                                getValue(StoresToEmit[i].second),
   6342                                MachinePointerInfo(StoresToEmit[i].second),
   6343                                false, false, 0);
   6344     OutChains.push_back(Val);
   6345   }
   6346 
   6347   if (!OutChains.empty())
   6348     Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
   6349                         &OutChains[0], OutChains.size());
   6350 
   6351   DAG.setRoot(Chain);
   6352 }
   6353 
   6354 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
   6355   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
   6356                           MVT::Other, getRoot(),
   6357                           getValue(I.getArgOperand(0)),
   6358                           DAG.getSrcValue(I.getArgOperand(0))));
   6359 }
   6360 
   6361 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
   6362   const DataLayout &TD = *TLI.getDataLayout();
   6363   SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
   6364                            getRoot(), getValue(I.getOperand(0)),
   6365                            DAG.getSrcValue(I.getOperand(0)),
   6366                            TD.getABITypeAlignment(I.getType()));
   6367   setValue(&I, V);
   6368   DAG.setRoot(V.getValue(1));
   6369 }
   6370 
   6371 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
   6372   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
   6373                           MVT::Other, getRoot(),
   6374                           getValue(I.getArgOperand(0)),
   6375                           DAG.getSrcValue(I.getArgOperand(0))));
   6376 }
   6377 
   6378 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
   6379   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
   6380                           MVT::Other, getRoot(),
   6381                           getValue(I.getArgOperand(0)),
   6382                           getValue(I.getArgOperand(1)),
   6383                           DAG.getSrcValue(I.getArgOperand(0)),
   6384                           DAG.getSrcValue(I.getArgOperand(1))));
   6385 }
   6386 
   6387 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
   6388 /// implementation, which just calls LowerCall.
   6389 /// FIXME: When all targets are
   6390 /// migrated to using LowerCall, this hook should be integrated into SDISel.
   6391 std::pair<SDValue, SDValue>
   6392 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
   6393   // Handle all of the outgoing arguments.
   6394   CLI.Outs.clear();
   6395   CLI.OutVals.clear();
   6396   ArgListTy &Args = CLI.Args;
   6397   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
   6398     SmallVector<EVT, 4> ValueVTs;
   6399     ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
   6400     for (unsigned Value = 0, NumValues = ValueVTs.size();
   6401          Value != NumValues; ++Value) {
   6402       EVT VT = ValueVTs[Value];
   6403       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
   6404       SDValue Op = SDValue(Args[i].Node.getNode(),
   6405                            Args[i].Node.getResNo() + Value);
   6406       ISD::ArgFlagsTy Flags;
   6407       unsigned OriginalAlignment =
   6408         getDataLayout()->getABITypeAlignment(ArgTy);
   6409 
   6410       if (Args[i].isZExt)
   6411         Flags.setZExt();
   6412       if (Args[i].isSExt)
   6413         Flags.setSExt();
   6414       if (Args[i].isInReg)
   6415         Flags.setInReg();
   6416       if (Args[i].isSRet)
   6417         Flags.setSRet();
   6418       if (Args[i].isByVal) {
   6419         Flags.setByVal();
   6420         PointerType *Ty = cast<PointerType>(Args[i].Ty);
   6421         Type *ElementTy = Ty->getElementType();
   6422         Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
   6423         // For ByVal, alignment should come from FE.  BE will guess if this
   6424         // info is not there but there are cases it cannot get right.
   6425         unsigned FrameAlign;
   6426         if (Args[i].Alignment)
   6427           FrameAlign = Args[i].Alignment;
   6428         else
   6429           FrameAlign = getByValTypeAlignment(ElementTy);
   6430         Flags.setByValAlign(FrameAlign);
   6431       }
   6432       if (Args[i].isNest)
   6433         Flags.setNest();
   6434       Flags.setOrigAlign(OriginalAlignment);
   6435 
   6436       MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
   6437       unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
   6438       SmallVector<SDValue, 4> Parts(NumParts);
   6439       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
   6440 
   6441       if (Args[i].isSExt)
   6442         ExtendKind = ISD::SIGN_EXTEND;
   6443       else if (Args[i].isZExt)
   6444         ExtendKind = ISD::ZERO_EXTEND;
   6445 
   6446       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts,
   6447                      PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind);
   6448 
   6449       for (unsigned j = 0; j != NumParts; ++j) {
   6450         // if it isn't first piece, alignment must be 1
   6451         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(),
   6452                                i < CLI.NumFixedArgs,
   6453                                i, j*Parts[j].getValueType().getStoreSize());
   6454         if (NumParts > 1 && j == 0)
   6455           MyFlags.Flags.setSplit();
   6456         else if (j != 0)
   6457           MyFlags.Flags.setOrigAlign(1);
   6458 
   6459         CLI.Outs.push_back(MyFlags);
   6460         CLI.OutVals.push_back(Parts[j]);
   6461       }
   6462     }
   6463   }
   6464 
   6465   // Handle the incoming return values from the call.
   6466   CLI.Ins.clear();
   6467   SmallVector<EVT, 4> RetTys;
   6468   ComputeValueVTs(*this, CLI.RetTy, RetTys);
   6469   for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
   6470     EVT VT = RetTys[I];
   6471     MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
   6472     unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
   6473     for (unsigned i = 0; i != NumRegs; ++i) {
   6474       ISD::InputArg MyFlags;
   6475       MyFlags.VT = RegisterVT;
   6476       MyFlags.Used = CLI.IsReturnValueUsed;
   6477       if (CLI.RetSExt)
   6478         MyFlags.Flags.setSExt();
   6479       if (CLI.RetZExt)
   6480         MyFlags.Flags.setZExt();
   6481       if (CLI.IsInReg)
   6482         MyFlags.Flags.setInReg();
   6483       CLI.Ins.push_back(MyFlags);
   6484     }
   6485   }
   6486 
   6487   SmallVector<SDValue, 4> InVals;
   6488   CLI.Chain = LowerCall(CLI, InVals);
   6489 
   6490   // Verify that the target's LowerCall behaved as expected.
   6491   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
   6492          "LowerCall didn't return a valid chain!");
   6493   assert((!CLI.IsTailCall || InVals.empty()) &&
   6494          "LowerCall emitted a return value for a tail call!");
   6495   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
   6496          "LowerCall didn't emit the correct number of values!");
   6497 
   6498   // For a tail call, the return value is merely live-out and there aren't
   6499   // any nodes in the DAG representing it. Return a special value to
   6500   // indicate that a tail call has been emitted and no more Instructions
   6501   // should be processed in the current block.
   6502   if (CLI.IsTailCall) {
   6503     CLI.DAG.setRoot(CLI.Chain);
   6504     return std::make_pair(SDValue(), SDValue());
   6505   }
   6506 
   6507   DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
   6508           assert(InVals[i].getNode() &&
   6509                  "LowerCall emitted a null value!");
   6510           assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
   6511                  "LowerCall emitted a value with the wrong type!");
   6512         });
   6513 
   6514   // Collect the legal value parts into potentially illegal values
   6515   // that correspond to the original function's return values.
   6516   ISD::NodeType AssertOp = ISD::DELETED_NODE;
   6517   if (CLI.RetSExt)
   6518     AssertOp = ISD::AssertSext;
   6519   else if (CLI.RetZExt)
   6520     AssertOp = ISD::AssertZext;
   6521   SmallVector<SDValue, 4> ReturnValues;
   6522   unsigned CurReg = 0;
   6523   for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
   6524     EVT VT = RetTys[I];
   6525     MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
   6526     unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
   6527 
   6528     ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
   6529                                             NumRegs, RegisterVT, VT, NULL,
   6530                                             AssertOp));
   6531     CurReg += NumRegs;
   6532   }
   6533 
   6534   // For a function returning void, there is no return value. We can't create
   6535   // such a node, so we just return a null return value in that case. In
   6536   // that case, nothing will actually look at the value.
   6537   if (ReturnValues.empty())
   6538     return std::make_pair(SDValue(), CLI.Chain);
   6539 
   6540   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
   6541                                 CLI.DAG.getVTList(&RetTys[0], RetTys.size()),
   6542                             &ReturnValues[0], ReturnValues.size());
   6543   return std::make_pair(Res, CLI.Chain);
   6544 }
   6545 
   6546 void TargetLowering::LowerOperationWrapper(SDNode *N,
   6547                                            SmallVectorImpl<SDValue> &Results,
   6548                                            SelectionDAG &DAG) const {
   6549   SDValue Res = LowerOperation(SDValue(N, 0), DAG);
   6550   if (Res.getNode())
   6551     Results.push_back(Res);
   6552 }
   6553 
   6554 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   6555   llvm_unreachable("LowerOperation not implemented for this target!");
   6556 }
   6557 
   6558 void
   6559 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
   6560   SDValue Op = getNonRegisterValue(V);
   6561   assert((Op.getOpcode() != ISD::CopyFromReg ||
   6562           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
   6563          "Copy from a reg to the same reg!");
   6564   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
   6565 
   6566   RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
   6567   SDValue Chain = DAG.getEntryNode();
   6568   RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0, V);
   6569   PendingExports.push_back(Chain);
   6570 }
   6571 
   6572 #include "llvm/CodeGen/SelectionDAGISel.h"
   6573 
   6574 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
   6575 /// entry block, return true.  This includes arguments used by switches, since
   6576 /// the switch may expand into multiple basic blocks.
   6577 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
   6578   // With FastISel active, we may be splitting blocks, so force creation
   6579   // of virtual registers for all non-dead arguments.
   6580   if (FastISel)
   6581     return A->use_empty();
   6582 
   6583   const BasicBlock *Entry = A->getParent()->begin();
   6584   for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
   6585        UI != E; ++UI) {
   6586     const User *U = *UI;
   6587     if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
   6588       return false;  // Use not in entry block.
   6589   }
   6590   return true;
   6591 }
   6592 
   6593 void SelectionDAGISel::LowerArguments(const Function &F) {
   6594   SelectionDAG &DAG = SDB->DAG;
   6595   DebugLoc dl = SDB->getCurDebugLoc();
   6596   const DataLayout *TD = TLI.getDataLayout();
   6597   SmallVector<ISD::InputArg, 16> Ins;
   6598 
   6599   if (!FuncInfo->CanLowerReturn) {
   6600     // Put in an sret pointer parameter before all the other parameters.
   6601     SmallVector<EVT, 1> ValueVTs;
   6602     ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
   6603 
   6604     // NOTE: Assuming that a pointer will never break down to more than one VT
   6605     // or one register.
   6606     ISD::ArgFlagsTy Flags;
   6607     Flags.setSRet();
   6608     MVT RegisterVT = TLI.getRegisterType(*DAG.getContext(), ValueVTs[0]);
   6609     ISD::InputArg RetArg(Flags, RegisterVT, true, 0, 0);
   6610     Ins.push_back(RetArg);
   6611   }
   6612 
   6613   // Set up the incoming argument description vector.
   6614   unsigned Idx = 1;
   6615   for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
   6616        I != E; ++I, ++Idx) {
   6617     SmallVector<EVT, 4> ValueVTs;
   6618     ComputeValueVTs(TLI, I->getType(), ValueVTs);
   6619     bool isArgValueUsed = !I->use_empty();
   6620     for (unsigned Value = 0, NumValues = ValueVTs.size();
   6621          Value != NumValues; ++Value) {
   6622       EVT VT = ValueVTs[Value];
   6623       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
   6624       ISD::ArgFlagsTy Flags;
   6625       unsigned OriginalAlignment =
   6626         TD->getABITypeAlignment(ArgTy);
   6627 
   6628       if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
   6629         Flags.setZExt();
   6630       if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
   6631         Flags.setSExt();
   6632       if (F.getAttributes().hasAttribute(Idx, Attribute::InReg))
   6633         Flags.setInReg();
   6634       if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
   6635         Flags.setSRet();
   6636       if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal)) {
   6637         Flags.setByVal();
   6638         PointerType *Ty = cast<PointerType>(I->getType());
   6639         Type *ElementTy = Ty->getElementType();
   6640         Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
   6641         // For ByVal, alignment should be passed from FE.  BE will guess if
   6642         // this info is not there but there are cases it cannot get right.
   6643         unsigned FrameAlign;
   6644         if (F.getParamAlignment(Idx))
   6645           FrameAlign = F.getParamAlignment(Idx);
   6646         else
   6647           FrameAlign = TLI.getByValTypeAlignment(ElementTy);
   6648         Flags.setByValAlign(FrameAlign);
   6649       }
   6650       if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
   6651         Flags.setNest();
   6652       Flags.setOrigAlign(OriginalAlignment);
   6653 
   6654       MVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
   6655       unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
   6656       for (unsigned i = 0; i != NumRegs; ++i) {
   6657         ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed,
   6658                               Idx-1, i*RegisterVT.getStoreSize());
   6659         if (NumRegs > 1 && i == 0)
   6660           MyFlags.Flags.setSplit();
   6661         // if it isn't first piece, alignment must be 1
   6662         else if (i > 0)
   6663           MyFlags.Flags.setOrigAlign(1);
   6664         Ins.push_back(MyFlags);
   6665       }
   6666     }
   6667   }
   6668 
   6669   // Call the target to set up the argument values.
   6670   SmallVector<SDValue, 8> InVals;
   6671   SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
   6672                                              F.isVarArg(), Ins,
   6673                                              dl, DAG, InVals);
   6674 
   6675   // Verify that the target's LowerFormalArguments behaved as expected.
   6676   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
   6677          "LowerFormalArguments didn't return a valid chain!");
   6678   assert(InVals.size() == Ins.size() &&
   6679          "LowerFormalArguments didn't emit the correct number of values!");
   6680   DEBUG({
   6681       for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
   6682         assert(InVals[i].getNode() &&
   6683                "LowerFormalArguments emitted a null value!");
   6684         assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
   6685                "LowerFormalArguments emitted a value with the wrong type!");
   6686       }
   6687     });
   6688 
   6689   // Update the DAG with the new chain value resulting from argument lowering.
   6690   DAG.setRoot(NewRoot);
   6691 
   6692   // Set up the argument values.
   6693   unsigned i = 0;
   6694   Idx = 1;
   6695   if (!FuncInfo->CanLowerReturn) {
   6696     // Create a virtual register for the sret pointer, and put in a copy
   6697     // from the sret argument into it.
   6698     SmallVector<EVT, 1> ValueVTs;
   6699     ComputeValueVTs(TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
   6700     MVT VT = ValueVTs[0].getSimpleVT();
   6701     MVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
   6702     ISD::NodeType AssertOp = ISD::DELETED_NODE;
   6703     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
   6704                                         RegVT, VT, NULL, AssertOp);
   6705 
   6706     MachineFunction& MF = SDB->DAG.getMachineFunction();
   6707     MachineRegisterInfo& RegInfo = MF.getRegInfo();
   6708     unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT));
   6709     FuncInfo->DemoteRegister = SRetReg;
   6710     NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(),
   6711                                     SRetReg, ArgValue);
   6712     DAG.setRoot(NewRoot);
   6713 
   6714     // i indexes lowered arguments.  Bump it past the hidden sret argument.
   6715     // Idx indexes LLVM arguments.  Don't touch it.
   6716     ++i;
   6717   }
   6718 
   6719   for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
   6720       ++I, ++Idx) {
   6721     SmallVector<SDValue, 4> ArgValues;
   6722     SmallVector<EVT, 4> ValueVTs;
   6723     ComputeValueVTs(TLI, I->getType(), ValueVTs);
   6724     unsigned NumValues = ValueVTs.size();
   6725 
   6726     // If this argument is unused then remember its value. It is used to generate
   6727     // debugging information.
   6728     if (I->use_empty() && NumValues)
   6729       SDB->setUnusedArgValue(I, InVals[i]);
   6730 
   6731     for (unsigned Val = 0; Val != NumValues; ++Val) {
   6732       EVT VT = ValueVTs[Val];
   6733       MVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
   6734       unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
   6735 
   6736       if (!I->use_empty()) {
   6737         ISD::NodeType AssertOp = ISD::DELETED_NODE;
   6738         if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
   6739           AssertOp = ISD::AssertSext;
   6740         else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
   6741           AssertOp = ISD::AssertZext;
   6742 
   6743         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
   6744                                              NumParts, PartVT, VT,
   6745                                              NULL, AssertOp));
   6746       }
   6747 
   6748       i += NumParts;
   6749     }
   6750 
   6751     // We don't need to do anything else for unused arguments.
   6752     if (ArgValues.empty())
   6753       continue;
   6754 
   6755     // Note down frame index.
   6756     if (FrameIndexSDNode *FI =
   6757         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
   6758       FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
   6759 
   6760     SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues,
   6761                                      SDB->getCurDebugLoc());
   6762 
   6763     SDB->setValue(I, Res);
   6764     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
   6765       if (LoadSDNode *LNode =
   6766           dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
   6767         if (FrameIndexSDNode *FI =
   6768             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
   6769         FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
   6770     }
   6771 
   6772     // If this argument is live outside of the entry block, insert a copy from
   6773     // wherever we got it to the vreg that other BB's will reference it as.
   6774     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
   6775       // If we can, though, try to skip creating an unnecessary vreg.
   6776       // FIXME: This isn't very clean... it would be nice to make this more
   6777       // general.  It's also subtly incompatible with the hacks FastISel
   6778       // uses with vregs.
   6779       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
   6780       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
   6781         FuncInfo->ValueMap[I] = Reg;
   6782         continue;
   6783       }
   6784     }
   6785     if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
   6786       FuncInfo->InitializeRegForValue(I);
   6787       SDB->CopyToExportRegsIfNeeded(I);
   6788     }
   6789   }
   6790 
   6791   assert(i == InVals.size() && "Argument register count mismatch!");
   6792 
   6793   // Finally, if the target has anything special to do, allow it to do so.
   6794   // FIXME: this should insert code into the DAG!
   6795   EmitFunctionEntryCode();
   6796 }
   6797 
   6798 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
   6799 /// ensure constants are generated when needed.  Remember the virtual registers
   6800 /// that need to be added to the Machine PHI nodes as input.  We cannot just
   6801 /// directly add them, because expansion might result in multiple MBB's for one
   6802 /// BB.  As such, the start of the BB might correspond to a different MBB than
   6803 /// the end.
   6804 ///
   6805 void
   6806 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
   6807   const TerminatorInst *TI = LLVMBB->getTerminator();
   6808 
   6809   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
   6810 
   6811   // Check successor nodes' PHI nodes that expect a constant to be available
   6812   // from this block.
   6813   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
   6814     const BasicBlock *SuccBB = TI->getSuccessor(succ);
   6815     if (!isa<PHINode>(SuccBB->begin())) continue;
   6816     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
   6817 
   6818     // If this terminator has multiple identical successors (common for
   6819     // switches), only handle each succ once.
   6820     if (!SuccsHandled.insert(SuccMBB)) continue;
   6821 
   6822     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
   6823 
   6824     // At this point we know that there is a 1-1 correspondence between LLVM PHI
   6825     // nodes and Machine PHI nodes, but the incoming operands have not been
   6826     // emitted yet.
   6827     for (BasicBlock::const_iterator I = SuccBB->begin();
   6828          const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
   6829       // Ignore dead phi's.
   6830       if (PN->use_empty()) continue;
   6831 
   6832       // Skip empty types
   6833       if (PN->getType()->isEmptyTy())
   6834         continue;
   6835 
   6836       unsigned Reg;
   6837       const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
   6838 
   6839       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
   6840         unsigned &RegOut = ConstantsOut[C];
   6841         if (RegOut == 0) {
   6842           RegOut = FuncInfo.CreateRegs(C->getType());
   6843           CopyValueToVirtualRegister(C, RegOut);
   6844         }
   6845         Reg = RegOut;
   6846       } else {
   6847         DenseMap<const Value *, unsigned>::iterator I =
   6848           FuncInfo.ValueMap.find(PHIOp);
   6849         if (I != FuncInfo.ValueMap.end())
   6850           Reg = I->second;
   6851         else {
   6852           assert(isa<AllocaInst>(PHIOp) &&
   6853                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
   6854                  "Didn't codegen value into a register!??");
   6855           Reg = FuncInfo.CreateRegs(PHIOp->getType());
   6856           CopyValueToVirtualRegister(PHIOp, Reg);
   6857         }
   6858       }
   6859 
   6860       // Remember that this register needs to added to the machine PHI node as
   6861       // the input for this MBB.
   6862       SmallVector<EVT, 4> ValueVTs;
   6863       ComputeValueVTs(TLI, PN->getType(), ValueVTs);
   6864       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
   6865         EVT VT = ValueVTs[vti];
   6866         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
   6867         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
   6868           FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
   6869         Reg += NumRegisters;
   6870       }
   6871     }
   6872   }
   6873   ConstantsOut.clear();
   6874 }
   6875