Home | History | Annotate | Download | only in PowerPC
      1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements the PPCISelLowering class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "PPCISelLowering.h"
     15 #include "PPCMachineFunctionInfo.h"
     16 #include "PPCPerfectShuffle.h"
     17 #include "PPCTargetMachine.h"
     18 #include "MCTargetDesc/PPCPredicates.h"
     19 #include "llvm/CallingConv.h"
     20 #include "llvm/Constants.h"
     21 #include "llvm/DerivedTypes.h"
     22 #include "llvm/Function.h"
     23 #include "llvm/Intrinsics.h"
     24 #include "llvm/ADT/STLExtras.h"
     25 #include "llvm/CodeGen/CallingConvLower.h"
     26 #include "llvm/CodeGen/MachineFrameInfo.h"
     27 #include "llvm/CodeGen/MachineFunction.h"
     28 #include "llvm/CodeGen/MachineInstrBuilder.h"
     29 #include "llvm/CodeGen/MachineRegisterInfo.h"
     30 #include "llvm/CodeGen/SelectionDAG.h"
     31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
     32 #include "llvm/Support/CommandLine.h"
     33 #include "llvm/Support/ErrorHandling.h"
     34 #include "llvm/Support/MathExtras.h"
     35 #include "llvm/Support/raw_ostream.h"
     36 #include "llvm/Target/TargetOptions.h"
     37 using namespace llvm;
     38 
     39 static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
     40                                      CCValAssign::LocInfo &LocInfo,
     41                                      ISD::ArgFlagsTy &ArgFlags,
     42                                      CCState &State);
     43 static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
     44                                             MVT &LocVT,
     45                                             CCValAssign::LocInfo &LocInfo,
     46                                             ISD::ArgFlagsTy &ArgFlags,
     47                                             CCState &State);
     48 static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
     49                                               MVT &LocVT,
     50                                               CCValAssign::LocInfo &LocInfo,
     51                                               ISD::ArgFlagsTy &ArgFlags,
     52                                               CCState &State);
     53 
     54 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
     55 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
     56 
     57 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
     58 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
     59 
     60 static TargetLoweringObjectFile *CreateTLOF(const PPCTargetMachine &TM) {
     61   if (TM.getSubtargetImpl()->isDarwin())
     62     return new TargetLoweringObjectFileMachO();
     63 
     64   return new TargetLoweringObjectFileELF();
     65 }
     66 
     67 PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
     68   : TargetLowering(TM, CreateTLOF(TM)), PPCSubTarget(*TM.getSubtargetImpl()) {
     69   const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
     70 
     71   setPow2DivIsCheap();
     72 
     73   // Use _setjmp/_longjmp instead of setjmp/longjmp.
     74   setUseUnderscoreSetJmp(true);
     75   setUseUnderscoreLongJmp(true);
     76 
     77   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
     78   // arguments are at least 4/8 bytes aligned.
     79   bool isPPC64 = Subtarget->isPPC64();
     80   setMinStackArgumentAlignment(isPPC64 ? 8:4);
     81 
     82   // Set up the register classes.
     83   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
     84   addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
     85   addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
     86 
     87   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
     88   setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
     89   setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
     90 
     91   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
     92 
     93   // PowerPC has pre-inc load and store's.
     94   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
     95   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
     96   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
     97   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
     98   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
     99   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
    100   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
    101   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
    102   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
    103   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
    104 
    105   // This is used in the ppcf128->int sequence.  Note it has different semantics
    106   // from FP_ROUND:  that rounds to nearest, this rounds to zero.
    107   setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom);
    108 
    109   // We do not currently implement these libm ops for PowerPC.
    110   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
    111   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
    112   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
    113   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
    114   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
    115 
    116   // PowerPC has no SREM/UREM instructions
    117   setOperationAction(ISD::SREM, MVT::i32, Expand);
    118   setOperationAction(ISD::UREM, MVT::i32, Expand);
    119   setOperationAction(ISD::SREM, MVT::i64, Expand);
    120   setOperationAction(ISD::UREM, MVT::i64, Expand);
    121 
    122   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
    123   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
    124   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
    125   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
    126   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
    127   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
    128   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
    129   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
    130   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
    131 
    132   // We don't support sin/cos/sqrt/fmod/pow
    133   setOperationAction(ISD::FSIN , MVT::f64, Expand);
    134   setOperationAction(ISD::FCOS , MVT::f64, Expand);
    135   setOperationAction(ISD::FREM , MVT::f64, Expand);
    136   setOperationAction(ISD::FPOW , MVT::f64, Expand);
    137   setOperationAction(ISD::FMA  , MVT::f64, Legal);
    138   setOperationAction(ISD::FSIN , MVT::f32, Expand);
    139   setOperationAction(ISD::FCOS , MVT::f32, Expand);
    140   setOperationAction(ISD::FREM , MVT::f32, Expand);
    141   setOperationAction(ISD::FPOW , MVT::f32, Expand);
    142   setOperationAction(ISD::FMA  , MVT::f32, Legal);
    143 
    144   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
    145 
    146   // If we're enabling GP optimizations, use hardware square root
    147   if (!Subtarget->hasFSQRT()) {
    148     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
    149     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
    150   }
    151 
    152   setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
    153   setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
    154 
    155   // PowerPC does not have BSWAP, CTPOP or CTTZ
    156   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
    157   setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
    158   setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
    159   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
    160   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
    161   setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
    162   setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
    163   setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
    164   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
    165   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
    166 
    167   // PowerPC does not have ROTR
    168   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
    169   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
    170 
    171   // PowerPC does not have Select
    172   setOperationAction(ISD::SELECT, MVT::i32, Expand);
    173   setOperationAction(ISD::SELECT, MVT::i64, Expand);
    174   setOperationAction(ISD::SELECT, MVT::f32, Expand);
    175   setOperationAction(ISD::SELECT, MVT::f64, Expand);
    176 
    177   // PowerPC wants to turn select_cc of FP into fsel when possible.
    178   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
    179   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
    180 
    181   // PowerPC wants to optimize integer setcc a bit
    182   setOperationAction(ISD::SETCC, MVT::i32, Custom);
    183 
    184   // PowerPC does not have BRCOND which requires SetCC
    185   setOperationAction(ISD::BRCOND, MVT::Other, Expand);
    186 
    187   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
    188 
    189   // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
    190   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
    191 
    192   // PowerPC does not have [U|S]INT_TO_FP
    193   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
    194   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
    195 
    196   setOperationAction(ISD::BITCAST, MVT::f32, Expand);
    197   setOperationAction(ISD::BITCAST, MVT::i32, Expand);
    198   setOperationAction(ISD::BITCAST, MVT::i64, Expand);
    199   setOperationAction(ISD::BITCAST, MVT::f64, Expand);
    200 
    201   // We cannot sextinreg(i1).  Expand to shifts.
    202   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
    203 
    204   setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
    205   setOperationAction(ISD::EHSELECTION,   MVT::i64, Expand);
    206   setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
    207   setOperationAction(ISD::EHSELECTION,   MVT::i32, Expand);
    208 
    209 
    210   // We want to legalize GlobalAddress and ConstantPool nodes into the
    211   // appropriate instructions to materialize the address.
    212   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
    213   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
    214   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
    215   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
    216   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
    217   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
    218   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
    219   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
    220   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
    221   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
    222 
    223   // TRAP is legal.
    224   setOperationAction(ISD::TRAP, MVT::Other, Legal);
    225 
    226   // TRAMPOLINE is custom lowered.
    227   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
    228   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
    229 
    230   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
    231   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
    232 
    233   if (Subtarget->isSVR4ABI()) {
    234     if (isPPC64) {
    235       // VAARG always uses double-word chunks, so promote anything smaller.
    236       setOperationAction(ISD::VAARG, MVT::i1, Promote);
    237       AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64);
    238       setOperationAction(ISD::VAARG, MVT::i8, Promote);
    239       AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64);
    240       setOperationAction(ISD::VAARG, MVT::i16, Promote);
    241       AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64);
    242       setOperationAction(ISD::VAARG, MVT::i32, Promote);
    243       AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64);
    244       setOperationAction(ISD::VAARG, MVT::Other, Expand);
    245     } else {
    246       // VAARG is custom lowered with the 32-bit SVR4 ABI.
    247       setOperationAction(ISD::VAARG, MVT::Other, Custom);
    248       setOperationAction(ISD::VAARG, MVT::i64, Custom);
    249     }
    250   } else
    251     setOperationAction(ISD::VAARG, MVT::Other, Expand);
    252 
    253   // Use the default implementation.
    254   setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
    255   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
    256   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
    257   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
    258   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
    259   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
    260 
    261   // We want to custom lower some of our intrinsics.
    262   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
    263 
    264   // Comparisons that require checking two conditions.
    265   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
    266   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
    267   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
    268   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
    269   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
    270   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
    271   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
    272   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
    273   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
    274   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
    275   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
    276   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
    277 
    278   if (Subtarget->has64BitSupport()) {
    279     // They also have instructions for converting between i64 and fp.
    280     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
    281     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
    282     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
    283     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
    284     // This is just the low 32 bits of a (signed) fp->i64 conversion.
    285     // We cannot do this with Promote because i64 is not a legal type.
    286     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
    287 
    288     // FIXME: disable this lowered code.  This generates 64-bit register values,
    289     // and we don't model the fact that the top part is clobbered by calls.  We
    290     // need to flag these together so that the value isn't live across a call.
    291     //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
    292   } else {
    293     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
    294     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
    295   }
    296 
    297   if (Subtarget->use64BitRegs()) {
    298     // 64-bit PowerPC implementations can support i64 types directly
    299     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
    300     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
    301     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
    302     // 64-bit PowerPC wants to expand i128 shifts itself.
    303     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
    304     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
    305     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
    306   } else {
    307     // 32-bit PowerPC wants to expand i64 shifts itself.
    308     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
    309     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
    310     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
    311   }
    312 
    313   if (Subtarget->hasAltivec()) {
    314     // First set operation action for all vector types to expand. Then we
    315     // will selectively turn on ones that can be effectively codegen'd.
    316     for (unsigned i = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
    317          i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
    318       MVT::SimpleValueType VT = (MVT::SimpleValueType)i;
    319 
    320       // add/sub are legal for all supported vector VT's.
    321       setOperationAction(ISD::ADD , VT, Legal);
    322       setOperationAction(ISD::SUB , VT, Legal);
    323 
    324       // We promote all shuffles to v16i8.
    325       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
    326       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
    327 
    328       // We promote all non-typed operations to v4i32.
    329       setOperationAction(ISD::AND   , VT, Promote);
    330       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
    331       setOperationAction(ISD::OR    , VT, Promote);
    332       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
    333       setOperationAction(ISD::XOR   , VT, Promote);
    334       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
    335       setOperationAction(ISD::LOAD  , VT, Promote);
    336       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
    337       setOperationAction(ISD::SELECT, VT, Promote);
    338       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
    339       setOperationAction(ISD::STORE, VT, Promote);
    340       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
    341 
    342       // No other operations are legal.
    343       setOperationAction(ISD::MUL , VT, Expand);
    344       setOperationAction(ISD::SDIV, VT, Expand);
    345       setOperationAction(ISD::SREM, VT, Expand);
    346       setOperationAction(ISD::UDIV, VT, Expand);
    347       setOperationAction(ISD::UREM, VT, Expand);
    348       setOperationAction(ISD::FDIV, VT, Expand);
    349       setOperationAction(ISD::FNEG, VT, Expand);
    350       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
    351       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
    352       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
    353       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
    354       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
    355       setOperationAction(ISD::UDIVREM, VT, Expand);
    356       setOperationAction(ISD::SDIVREM, VT, Expand);
    357       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
    358       setOperationAction(ISD::FPOW, VT, Expand);
    359       setOperationAction(ISD::CTPOP, VT, Expand);
    360       setOperationAction(ISD::CTLZ, VT, Expand);
    361       setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
    362       setOperationAction(ISD::CTTZ, VT, Expand);
    363       setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
    364     }
    365 
    366     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
    367     // with merges, splats, etc.
    368     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
    369 
    370     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
    371     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
    372     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
    373     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
    374     setOperationAction(ISD::SELECT, MVT::v4i32, Expand);
    375     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
    376 
    377     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
    378     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
    379     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
    380     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
    381 
    382     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
    383     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
    384     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
    385     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
    386     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
    387 
    388     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
    389     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
    390 
    391     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
    392     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
    393     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
    394     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
    395   }
    396 
    397   if (Subtarget->has64BitSupport()) {
    398     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
    399     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
    400   }
    401 
    402   setOperationAction(ISD::ATOMIC_LOAD,  MVT::i32, Expand);
    403   setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
    404 
    405   setBooleanContents(ZeroOrOneBooleanContent);
    406   setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
    407 
    408   if (isPPC64) {
    409     setStackPointerRegisterToSaveRestore(PPC::X1);
    410     setExceptionPointerRegister(PPC::X3);
    411     setExceptionSelectorRegister(PPC::X4);
    412   } else {
    413     setStackPointerRegisterToSaveRestore(PPC::R1);
    414     setExceptionPointerRegister(PPC::R3);
    415     setExceptionSelectorRegister(PPC::R4);
    416   }
    417 
    418   // We have target-specific dag combine patterns for the following nodes:
    419   setTargetDAGCombine(ISD::SINT_TO_FP);
    420   setTargetDAGCombine(ISD::STORE);
    421   setTargetDAGCombine(ISD::BR_CC);
    422   setTargetDAGCombine(ISD::BSWAP);
    423 
    424   // Darwin long double math library functions have $LDBL128 appended.
    425   if (Subtarget->isDarwin()) {
    426     setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128");
    427     setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128");
    428     setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128");
    429     setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128");
    430     setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128");
    431     setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128");
    432     setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128");
    433     setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128");
    434     setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128");
    435     setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128");
    436   }
    437 
    438   setMinFunctionAlignment(2);
    439   if (PPCSubTarget.isDarwin())
    440     setPrefFunctionAlignment(4);
    441 
    442   if (isPPC64 && Subtarget->isJITCodeModel())
    443     // Temporary workaround for the inability of PPC64 JIT to handle jump
    444     // tables.
    445     setSupportJumpTables(false);
    446 
    447   setInsertFencesForAtomic(true);
    448 
    449   setSchedulingPreference(Sched::Hybrid);
    450 
    451   computeRegisterProperties();
    452 
    453   // The Freescale cores does better with aggressive inlining of memcpy and
    454   // friends. Gcc uses same threshold of 128 bytes (= 32 word stores).
    455   if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc ||
    456       Subtarget->getDarwinDirective() == PPC::DIR_E5500) {
    457     maxStoresPerMemset = 32;
    458     maxStoresPerMemsetOptSize = 16;
    459     maxStoresPerMemcpy = 32;
    460     maxStoresPerMemcpyOptSize = 8;
    461     maxStoresPerMemmove = 32;
    462     maxStoresPerMemmoveOptSize = 8;
    463 
    464     setPrefFunctionAlignment(4);
    465     benefitFromCodePlacementOpt = true;
    466   }
    467 }
    468 
    469 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
    470 /// function arguments in the caller parameter area.
    471 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty) const {
    472   const TargetMachine &TM = getTargetMachine();
    473   // Darwin passes everything on 4 byte boundary.
    474   if (TM.getSubtarget<PPCSubtarget>().isDarwin())
    475     return 4;
    476 
    477   // 16byte and wider vectors are passed on 16byte boundary.
    478   if (VectorType *VTy = dyn_cast<VectorType>(Ty))
    479     if (VTy->getBitWidth() >= 128)
    480       return 16;
    481 
    482   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
    483    if (PPCSubTarget.isPPC64())
    484      return 8;
    485 
    486   return 4;
    487 }
    488 
    489 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
    490   switch (Opcode) {
    491   default: return 0;
    492   case PPCISD::FSEL:            return "PPCISD::FSEL";
    493   case PPCISD::FCFID:           return "PPCISD::FCFID";
    494   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
    495   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
    496   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
    497   case PPCISD::VMADDFP:         return "PPCISD::VMADDFP";
    498   case PPCISD::VNMSUBFP:        return "PPCISD::VNMSUBFP";
    499   case PPCISD::VPERM:           return "PPCISD::VPERM";
    500   case PPCISD::Hi:              return "PPCISD::Hi";
    501   case PPCISD::Lo:              return "PPCISD::Lo";
    502   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
    503   case PPCISD::TOC_RESTORE:     return "PPCISD::TOC_RESTORE";
    504   case PPCISD::LOAD:            return "PPCISD::LOAD";
    505   case PPCISD::LOAD_TOC:        return "PPCISD::LOAD_TOC";
    506   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
    507   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
    508   case PPCISD::SRL:             return "PPCISD::SRL";
    509   case PPCISD::SRA:             return "PPCISD::SRA";
    510   case PPCISD::SHL:             return "PPCISD::SHL";
    511   case PPCISD::EXTSW_32:        return "PPCISD::EXTSW_32";
    512   case PPCISD::STD_32:          return "PPCISD::STD_32";
    513   case PPCISD::CALL_SVR4:       return "PPCISD::CALL_SVR4";
    514   case PPCISD::CALL_NOP_SVR4:   return "PPCISD::CALL_NOP_SVR4";
    515   case PPCISD::CALL_Darwin:     return "PPCISD::CALL_Darwin";
    516   case PPCISD::NOP:             return "PPCISD::NOP";
    517   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
    518   case PPCISD::BCTRL_Darwin:    return "PPCISD::BCTRL_Darwin";
    519   case PPCISD::BCTRL_SVR4:      return "PPCISD::BCTRL_SVR4";
    520   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
    521   case PPCISD::MFCR:            return "PPCISD::MFCR";
    522   case PPCISD::VCMP:            return "PPCISD::VCMP";
    523   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
    524   case PPCISD::LBRX:            return "PPCISD::LBRX";
    525   case PPCISD::STBRX:           return "PPCISD::STBRX";
    526   case PPCISD::LARX:            return "PPCISD::LARX";
    527   case PPCISD::STCX:            return "PPCISD::STCX";
    528   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
    529   case PPCISD::MFFS:            return "PPCISD::MFFS";
    530   case PPCISD::MTFSB0:          return "PPCISD::MTFSB0";
    531   case PPCISD::MTFSB1:          return "PPCISD::MTFSB1";
    532   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
    533   case PPCISD::MTFSF:           return "PPCISD::MTFSF";
    534   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
    535   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
    536   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
    537   }
    538 }
    539 
    540 EVT PPCTargetLowering::getSetCCResultType(EVT VT) const {
    541   return MVT::i32;
    542 }
    543 
    544 //===----------------------------------------------------------------------===//
    545 // Node matching predicates, for use by the tblgen matching code.
    546 //===----------------------------------------------------------------------===//
    547 
    548 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
    549 static bool isFloatingPointZero(SDValue Op) {
    550   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
    551     return CFP->getValueAPF().isZero();
    552   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
    553     // Maybe this has already been legalized into the constant pool?
    554     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
    555       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
    556         return CFP->getValueAPF().isZero();
    557   }
    558   return false;
    559 }
    560 
    561 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
    562 /// true if Op is undef or if it matches the specified value.
    563 static bool isConstantOrUndef(int Op, int Val) {
    564   return Op < 0 || Op == Val;
    565 }
    566 
    567 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
    568 /// VPKUHUM instruction.
    569 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
    570   if (!isUnary) {
    571     for (unsigned i = 0; i != 16; ++i)
    572       if (!isConstantOrUndef(N->getMaskElt(i),  i*2+1))
    573         return false;
    574   } else {
    575     for (unsigned i = 0; i != 8; ++i)
    576       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+1) ||
    577           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+1))
    578         return false;
    579   }
    580   return true;
    581 }
    582 
    583 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
    584 /// VPKUWUM instruction.
    585 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
    586   if (!isUnary) {
    587     for (unsigned i = 0; i != 16; i += 2)
    588       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
    589           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
    590         return false;
    591   } else {
    592     for (unsigned i = 0; i != 8; i += 2)
    593       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
    594           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3) ||
    595           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+2) ||
    596           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+3))
    597         return false;
    598   }
    599   return true;
    600 }
    601 
    602 /// isVMerge - Common function, used to match vmrg* shuffles.
    603 ///
    604 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
    605                      unsigned LHSStart, unsigned RHSStart) {
    606   assert(N->getValueType(0) == MVT::v16i8 &&
    607          "PPC only supports shuffles by bytes!");
    608   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
    609          "Unsupported merge size!");
    610 
    611   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
    612     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
    613       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
    614                              LHSStart+j+i*UnitSize) ||
    615           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
    616                              RHSStart+j+i*UnitSize))
    617         return false;
    618     }
    619   return true;
    620 }
    621 
    622 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
    623 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
    624 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
    625                              bool isUnary) {
    626   if (!isUnary)
    627     return isVMerge(N, UnitSize, 8, 24);
    628   return isVMerge(N, UnitSize, 8, 8);
    629 }
    630 
    631 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
    632 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
    633 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
    634                              bool isUnary) {
    635   if (!isUnary)
    636     return isVMerge(N, UnitSize, 0, 16);
    637   return isVMerge(N, UnitSize, 0, 0);
    638 }
    639 
    640 
    641 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
    642 /// amount, otherwise return -1.
    643 int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
    644   assert(N->getValueType(0) == MVT::v16i8 &&
    645          "PPC only supports shuffles by bytes!");
    646 
    647   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
    648 
    649   // Find the first non-undef value in the shuffle mask.
    650   unsigned i;
    651   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
    652     /*search*/;
    653 
    654   if (i == 16) return -1;  // all undef.
    655 
    656   // Otherwise, check to see if the rest of the elements are consecutively
    657   // numbered from this value.
    658   unsigned ShiftAmt = SVOp->getMaskElt(i);
    659   if (ShiftAmt < i) return -1;
    660   ShiftAmt -= i;
    661 
    662   if (!isUnary) {
    663     // Check the rest of the elements to see if they are consecutive.
    664     for (++i; i != 16; ++i)
    665       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
    666         return -1;
    667   } else {
    668     // Check the rest of the elements to see if they are consecutive.
    669     for (++i; i != 16; ++i)
    670       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
    671         return -1;
    672   }
    673   return ShiftAmt;
    674 }
    675 
    676 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
    677 /// specifies a splat of a single element that is suitable for input to
    678 /// VSPLTB/VSPLTH/VSPLTW.
    679 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
    680   assert(N->getValueType(0) == MVT::v16i8 &&
    681          (EltSize == 1 || EltSize == 2 || EltSize == 4));
    682 
    683   // This is a splat operation if each element of the permute is the same, and
    684   // if the value doesn't reference the second vector.
    685   unsigned ElementBase = N->getMaskElt(0);
    686 
    687   // FIXME: Handle UNDEF elements too!
    688   if (ElementBase >= 16)
    689     return false;
    690 
    691   // Check that the indices are consecutive, in the case of a multi-byte element
    692   // splatted with a v16i8 mask.
    693   for (unsigned i = 1; i != EltSize; ++i)
    694     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
    695       return false;
    696 
    697   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
    698     if (N->getMaskElt(i) < 0) continue;
    699     for (unsigned j = 0; j != EltSize; ++j)
    700       if (N->getMaskElt(i+j) != N->getMaskElt(j))
    701         return false;
    702   }
    703   return true;
    704 }
    705 
    706 /// isAllNegativeZeroVector - Returns true if all elements of build_vector
    707 /// are -0.0.
    708 bool PPC::isAllNegativeZeroVector(SDNode *N) {
    709   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
    710 
    711   APInt APVal, APUndef;
    712   unsigned BitSize;
    713   bool HasAnyUndefs;
    714 
    715   if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true))
    716     if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
    717       return CFP->getValueAPF().isNegZero();
    718 
    719   return false;
    720 }
    721 
    722 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
    723 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
    724 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
    725   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
    726   assert(isSplatShuffleMask(SVOp, EltSize));
    727   return SVOp->getMaskElt(0) / EltSize;
    728 }
    729 
    730 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
    731 /// by using a vspltis[bhw] instruction of the specified element size, return
    732 /// the constant being splatted.  The ByteSize field indicates the number of
    733 /// bytes of each element [124] -> [bhw].
    734 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
    735   SDValue OpVal(0, 0);
    736 
    737   // If ByteSize of the splat is bigger than the element size of the
    738   // build_vector, then we have a case where we are checking for a splat where
    739   // multiple elements of the buildvector are folded together into a single
    740   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
    741   unsigned EltSize = 16/N->getNumOperands();
    742   if (EltSize < ByteSize) {
    743     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
    744     SDValue UniquedVals[4];
    745     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
    746 
    747     // See if all of the elements in the buildvector agree across.
    748     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
    749       if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
    750       // If the element isn't a constant, bail fully out.
    751       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
    752 
    753 
    754       if (UniquedVals[i&(Multiple-1)].getNode() == 0)
    755         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
    756       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
    757         return SDValue();  // no match.
    758     }
    759 
    760     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
    761     // either constant or undef values that are identical for each chunk.  See
    762     // if these chunks can form into a larger vspltis*.
    763 
    764     // Check to see if all of the leading entries are either 0 or -1.  If
    765     // neither, then this won't fit into the immediate field.
    766     bool LeadingZero = true;
    767     bool LeadingOnes = true;
    768     for (unsigned i = 0; i != Multiple-1; ++i) {
    769       if (UniquedVals[i].getNode() == 0) continue;  // Must have been undefs.
    770 
    771       LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
    772       LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
    773     }
    774     // Finally, check the least significant entry.
    775     if (LeadingZero) {
    776       if (UniquedVals[Multiple-1].getNode() == 0)
    777         return DAG.getTargetConstant(0, MVT::i32);  // 0,0,0,undef
    778       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
    779       if (Val < 16)
    780         return DAG.getTargetConstant(Val, MVT::i32);  // 0,0,0,4 -> vspltisw(4)
    781     }
    782     if (LeadingOnes) {
    783       if (UniquedVals[Multiple-1].getNode() == 0)
    784         return DAG.getTargetConstant(~0U, MVT::i32);  // -1,-1,-1,undef
    785       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
    786       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
    787         return DAG.getTargetConstant(Val, MVT::i32);
    788     }
    789 
    790     return SDValue();
    791   }
    792 
    793   // Check to see if this buildvec has a single non-undef value in its elements.
    794   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
    795     if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
    796     if (OpVal.getNode() == 0)
    797       OpVal = N->getOperand(i);
    798     else if (OpVal != N->getOperand(i))
    799       return SDValue();
    800   }
    801 
    802   if (OpVal.getNode() == 0) return SDValue();  // All UNDEF: use implicit def.
    803 
    804   unsigned ValSizeInBytes = EltSize;
    805   uint64_t Value = 0;
    806   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
    807     Value = CN->getZExtValue();
    808   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
    809     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
    810     Value = FloatToBits(CN->getValueAPF().convertToFloat());
    811   }
    812 
    813   // If the splat value is larger than the element value, then we can never do
    814   // this splat.  The only case that we could fit the replicated bits into our
    815   // immediate field for would be zero, and we prefer to use vxor for it.
    816   if (ValSizeInBytes < ByteSize) return SDValue();
    817 
    818   // If the element value is larger than the splat value, cut it in half and
    819   // check to see if the two halves are equal.  Continue doing this until we
    820   // get to ByteSize.  This allows us to handle 0x01010101 as 0x01.
    821   while (ValSizeInBytes > ByteSize) {
    822     ValSizeInBytes >>= 1;
    823 
    824     // If the top half equals the bottom half, we're still ok.
    825     if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) !=
    826          (Value                        & ((1 << (8*ValSizeInBytes))-1)))
    827       return SDValue();
    828   }
    829 
    830   // Properly sign extend the value.
    831   int MaskVal = SignExtend32(Value, ByteSize * 8);
    832 
    833   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
    834   if (MaskVal == 0) return SDValue();
    835 
    836   // Finally, if this value fits in a 5 bit sext field, return it
    837   if (SignExtend32<5>(MaskVal) == MaskVal)
    838     return DAG.getTargetConstant(MaskVal, MVT::i32);
    839   return SDValue();
    840 }
    841 
    842 //===----------------------------------------------------------------------===//
    843 //  Addressing Mode Selection
    844 //===----------------------------------------------------------------------===//
    845 
    846 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
    847 /// or 64-bit immediate, and if the value can be accurately represented as a
    848 /// sign extension from a 16-bit value.  If so, this returns true and the
    849 /// immediate.
    850 static bool isIntS16Immediate(SDNode *N, short &Imm) {
    851   if (N->getOpcode() != ISD::Constant)
    852     return false;
    853 
    854   Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
    855   if (N->getValueType(0) == MVT::i32)
    856     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
    857   else
    858     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
    859 }
    860 static bool isIntS16Immediate(SDValue Op, short &Imm) {
    861   return isIntS16Immediate(Op.getNode(), Imm);
    862 }
    863 
    864 
    865 /// SelectAddressRegReg - Given the specified addressed, check to see if it
    866 /// can be represented as an indexed [r+r] operation.  Returns false if it
    867 /// can be more efficiently represented with [r+imm].
    868 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
    869                                             SDValue &Index,
    870                                             SelectionDAG &DAG) const {
    871   short imm = 0;
    872   if (N.getOpcode() == ISD::ADD) {
    873     if (isIntS16Immediate(N.getOperand(1), imm))
    874       return false;    // r+i
    875     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
    876       return false;    // r+i
    877 
    878     Base = N.getOperand(0);
    879     Index = N.getOperand(1);
    880     return true;
    881   } else if (N.getOpcode() == ISD::OR) {
    882     if (isIntS16Immediate(N.getOperand(1), imm))
    883       return false;    // r+i can fold it if we can.
    884 
    885     // If this is an or of disjoint bitfields, we can codegen this as an add
    886     // (for better address arithmetic) if the LHS and RHS of the OR are provably
    887     // disjoint.
    888     APInt LHSKnownZero, LHSKnownOne;
    889     APInt RHSKnownZero, RHSKnownOne;
    890     DAG.ComputeMaskedBits(N.getOperand(0),
    891                           LHSKnownZero, LHSKnownOne);
    892 
    893     if (LHSKnownZero.getBoolValue()) {
    894       DAG.ComputeMaskedBits(N.getOperand(1),
    895                             RHSKnownZero, RHSKnownOne);
    896       // If all of the bits are known zero on the LHS or RHS, the add won't
    897       // carry.
    898       if (~(LHSKnownZero | RHSKnownZero) == 0) {
    899         Base = N.getOperand(0);
    900         Index = N.getOperand(1);
    901         return true;
    902       }
    903     }
    904   }
    905 
    906   return false;
    907 }
    908 
    909 /// Returns true if the address N can be represented by a base register plus
    910 /// a signed 16-bit displacement [r+imm], and if it is not better
    911 /// represented as reg+reg.
    912 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
    913                                             SDValue &Base,
    914                                             SelectionDAG &DAG) const {
    915   // FIXME dl should come from parent load or store, not from address
    916   DebugLoc dl = N.getDebugLoc();
    917   // If this can be more profitably realized as r+r, fail.
    918   if (SelectAddressRegReg(N, Disp, Base, DAG))
    919     return false;
    920 
    921   if (N.getOpcode() == ISD::ADD) {
    922     short imm = 0;
    923     if (isIntS16Immediate(N.getOperand(1), imm)) {
    924       Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
    925       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
    926         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
    927       } else {
    928         Base = N.getOperand(0);
    929       }
    930       return true; // [r+i]
    931     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
    932       // Match LOAD (ADD (X, Lo(G))).
    933       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
    934              && "Cannot handle constant offsets yet!");
    935       Disp = N.getOperand(1).getOperand(0);  // The global address.
    936       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
    937              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
    938              Disp.getOpcode() == ISD::TargetConstantPool ||
    939              Disp.getOpcode() == ISD::TargetJumpTable);
    940       Base = N.getOperand(0);
    941       return true;  // [&g+r]
    942     }
    943   } else if (N.getOpcode() == ISD::OR) {
    944     short imm = 0;
    945     if (isIntS16Immediate(N.getOperand(1), imm)) {
    946       // If this is an or of disjoint bitfields, we can codegen this as an add
    947       // (for better address arithmetic) if the LHS and RHS of the OR are
    948       // provably disjoint.
    949       APInt LHSKnownZero, LHSKnownOne;
    950       DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
    951 
    952       if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
    953         // If all of the bits are known zero on the LHS or RHS, the add won't
    954         // carry.
    955         Base = N.getOperand(0);
    956         Disp = DAG.getTargetConstant((int)imm & 0xFFFF, MVT::i32);
    957         return true;
    958       }
    959     }
    960   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
    961     // Loading from a constant address.
    962 
    963     // If this address fits entirely in a 16-bit sext immediate field, codegen
    964     // this as "d, 0"
    965     short Imm;
    966     if (isIntS16Immediate(CN, Imm)) {
    967       Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
    968       Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0,
    969                              CN->getValueType(0));
    970       return true;
    971     }
    972 
    973     // Handle 32-bit sext immediates with LIS + addr mode.
    974     if (CN->getValueType(0) == MVT::i32 ||
    975         (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
    976       int Addr = (int)CN->getZExtValue();
    977 
    978       // Otherwise, break this down into an LIS + disp.
    979       Disp = DAG.getTargetConstant((short)Addr, MVT::i32);
    980 
    981       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32);
    982       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
    983       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
    984       return true;
    985     }
    986   }
    987 
    988   Disp = DAG.getTargetConstant(0, getPointerTy());
    989   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
    990     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
    991   else
    992     Base = N;
    993   return true;      // [r+0]
    994 }
    995 
    996 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
    997 /// represented as an indexed [r+r] operation.
    998 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
    999                                                 SDValue &Index,
   1000                                                 SelectionDAG &DAG) const {
   1001   // Check to see if we can easily represent this as an [r+r] address.  This
   1002   // will fail if it thinks that the address is more profitably represented as
   1003   // reg+imm, e.g. where imm = 0.
   1004   if (SelectAddressRegReg(N, Base, Index, DAG))
   1005     return true;
   1006 
   1007   // If the operand is an addition, always emit this as [r+r], since this is
   1008   // better (for code size, and execution, as the memop does the add for free)
   1009   // than emitting an explicit add.
   1010   if (N.getOpcode() == ISD::ADD) {
   1011     Base = N.getOperand(0);
   1012     Index = N.getOperand(1);
   1013     return true;
   1014   }
   1015 
   1016   // Otherwise, do it the hard way, using R0 as the base register.
   1017   Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0,
   1018                          N.getValueType());
   1019   Index = N;
   1020   return true;
   1021 }
   1022 
   1023 /// SelectAddressRegImmShift - Returns true if the address N can be
   1024 /// represented by a base register plus a signed 14-bit displacement
   1025 /// [r+imm*4].  Suitable for use by STD and friends.
   1026 bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
   1027                                                  SDValue &Base,
   1028                                                  SelectionDAG &DAG) const {
   1029   // FIXME dl should come from the parent load or store, not the address
   1030   DebugLoc dl = N.getDebugLoc();
   1031   // If this can be more profitably realized as r+r, fail.
   1032   if (SelectAddressRegReg(N, Disp, Base, DAG))
   1033     return false;
   1034 
   1035   if (N.getOpcode() == ISD::ADD) {
   1036     short imm = 0;
   1037     if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
   1038       Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
   1039       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
   1040         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
   1041       } else {
   1042         Base = N.getOperand(0);
   1043       }
   1044       return true; // [r+i]
   1045     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
   1046       // Match LOAD (ADD (X, Lo(G))).
   1047       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
   1048              && "Cannot handle constant offsets yet!");
   1049       Disp = N.getOperand(1).getOperand(0);  // The global address.
   1050       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
   1051              Disp.getOpcode() == ISD::TargetConstantPool ||
   1052              Disp.getOpcode() == ISD::TargetJumpTable);
   1053       Base = N.getOperand(0);
   1054       return true;  // [&g+r]
   1055     }
   1056   } else if (N.getOpcode() == ISD::OR) {
   1057     short imm = 0;
   1058     if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
   1059       // If this is an or of disjoint bitfields, we can codegen this as an add
   1060       // (for better address arithmetic) if the LHS and RHS of the OR are
   1061       // provably disjoint.
   1062       APInt LHSKnownZero, LHSKnownOne;
   1063       DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
   1064       if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
   1065         // If all of the bits are known zero on the LHS or RHS, the add won't
   1066         // carry.
   1067         Base = N.getOperand(0);
   1068         Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
   1069         return true;
   1070       }
   1071     }
   1072   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
   1073     // Loading from a constant address.  Verify low two bits are clear.
   1074     if ((CN->getZExtValue() & 3) == 0) {
   1075       // If this address fits entirely in a 14-bit sext immediate field, codegen
   1076       // this as "d, 0"
   1077       short Imm;
   1078       if (isIntS16Immediate(CN, Imm)) {
   1079         Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
   1080         Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::X0 : PPC::R0,
   1081                                CN->getValueType(0));
   1082         return true;
   1083       }
   1084 
   1085       // Fold the low-part of 32-bit absolute addresses into addr mode.
   1086       if (CN->getValueType(0) == MVT::i32 ||
   1087           (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
   1088         int Addr = (int)CN->getZExtValue();
   1089 
   1090         // Otherwise, break this down into an LIS + disp.
   1091         Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
   1092         Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
   1093         unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
   1094         Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base),0);
   1095         return true;
   1096       }
   1097     }
   1098   }
   1099 
   1100   Disp = DAG.getTargetConstant(0, getPointerTy());
   1101   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
   1102     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
   1103   else
   1104     Base = N;
   1105   return true;      // [r+0]
   1106 }
   1107 
   1108 
   1109 /// getPreIndexedAddressParts - returns true by value, base pointer and
   1110 /// offset pointer and addressing mode by reference if the node's address
   1111 /// can be legally represented as pre-indexed load / store address.
   1112 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
   1113                                                   SDValue &Offset,
   1114                                                   ISD::MemIndexedMode &AM,
   1115                                                   SelectionDAG &DAG) const {
   1116   if (DisablePPCPreinc) return false;
   1117 
   1118   SDValue Ptr;
   1119   EVT VT;
   1120   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
   1121     Ptr = LD->getBasePtr();
   1122     VT = LD->getMemoryVT();
   1123 
   1124   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
   1125     Ptr = ST->getBasePtr();
   1126     VT  = ST->getMemoryVT();
   1127   } else
   1128     return false;
   1129 
   1130   // PowerPC doesn't have preinc load/store instructions for vectors.
   1131   if (VT.isVector())
   1132     return false;
   1133 
   1134   if (SelectAddressRegReg(Ptr, Offset, Base, DAG)) {
   1135     AM = ISD::PRE_INC;
   1136     return true;
   1137   }
   1138 
   1139   // LDU/STU use reg+imm*4, others use reg+imm.
   1140   if (VT != MVT::i64) {
   1141     // reg + imm
   1142     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
   1143       return false;
   1144   } else {
   1145     // reg + imm * 4.
   1146     if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
   1147       return false;
   1148   }
   1149 
   1150   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
   1151     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
   1152     // sext i32 to i64 when addr mode is r+i.
   1153     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
   1154         LD->getExtensionType() == ISD::SEXTLOAD &&
   1155         isa<ConstantSDNode>(Offset))
   1156       return false;
   1157   }
   1158 
   1159   AM = ISD::PRE_INC;
   1160   return true;
   1161 }
   1162 
   1163 //===----------------------------------------------------------------------===//
   1164 //  LowerOperation implementation
   1165 //===----------------------------------------------------------------------===//
   1166 
   1167 /// GetLabelAccessInfo - Return true if we should reference labels using a
   1168 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags.
   1169 static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
   1170                                unsigned &LoOpFlags, const GlobalValue *GV = 0) {
   1171   HiOpFlags = PPCII::MO_HA16;
   1172   LoOpFlags = PPCII::MO_LO16;
   1173 
   1174   // Don't use the pic base if not in PIC relocation model.  Or if we are on a
   1175   // non-darwin platform.  We don't support PIC on other platforms yet.
   1176   bool isPIC = TM.getRelocationModel() == Reloc::PIC_ &&
   1177                TM.getSubtarget<PPCSubtarget>().isDarwin();
   1178   if (isPIC) {
   1179     HiOpFlags |= PPCII::MO_PIC_FLAG;
   1180     LoOpFlags |= PPCII::MO_PIC_FLAG;
   1181   }
   1182 
   1183   // If this is a reference to a global value that requires a non-lazy-ptr, make
   1184   // sure that instruction lowering adds it.
   1185   if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
   1186     HiOpFlags |= PPCII::MO_NLP_FLAG;
   1187     LoOpFlags |= PPCII::MO_NLP_FLAG;
   1188 
   1189     if (GV->hasHiddenVisibility()) {
   1190       HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
   1191       LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
   1192     }
   1193   }
   1194 
   1195   return isPIC;
   1196 }
   1197 
   1198 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
   1199                              SelectionDAG &DAG) {
   1200   EVT PtrVT = HiPart.getValueType();
   1201   SDValue Zero = DAG.getConstant(0, PtrVT);
   1202   DebugLoc DL = HiPart.getDebugLoc();
   1203 
   1204   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
   1205   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
   1206 
   1207   // With PIC, the first instruction is actually "GR+hi(&G)".
   1208   if (isPIC)
   1209     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
   1210                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
   1211 
   1212   // Generate non-pic code that has direct accesses to the constant pool.
   1213   // The address of the global is just (hi(&g)+lo(&g)).
   1214   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
   1215 }
   1216 
   1217 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
   1218                                              SelectionDAG &DAG) const {
   1219   EVT PtrVT = Op.getValueType();
   1220   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
   1221   const Constant *C = CP->getConstVal();
   1222 
   1223   // 64-bit SVR4 ABI code is always position-independent.
   1224   // The actual address of the GlobalValue is stored in the TOC.
   1225   if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
   1226     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0);
   1227     return DAG.getNode(PPCISD::TOC_ENTRY, CP->getDebugLoc(), MVT::i64, GA,
   1228                        DAG.getRegister(PPC::X2, MVT::i64));
   1229   }
   1230 
   1231   unsigned MOHiFlag, MOLoFlag;
   1232   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
   1233   SDValue CPIHi =
   1234     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag);
   1235   SDValue CPILo =
   1236     DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag);
   1237   return LowerLabelRef(CPIHi, CPILo, isPIC, DAG);
   1238 }
   1239 
   1240 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
   1241   EVT PtrVT = Op.getValueType();
   1242   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
   1243 
   1244   // 64-bit SVR4 ABI code is always position-independent.
   1245   // The actual address of the GlobalValue is stored in the TOC.
   1246   if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
   1247     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
   1248     return DAG.getNode(PPCISD::TOC_ENTRY, JT->getDebugLoc(), MVT::i64, GA,
   1249                        DAG.getRegister(PPC::X2, MVT::i64));
   1250   }
   1251 
   1252   unsigned MOHiFlag, MOLoFlag;
   1253   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
   1254   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
   1255   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
   1256   return LowerLabelRef(JTIHi, JTILo, isPIC, DAG);
   1257 }
   1258 
   1259 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
   1260                                              SelectionDAG &DAG) const {
   1261   EVT PtrVT = Op.getValueType();
   1262 
   1263   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
   1264 
   1265   unsigned MOHiFlag, MOLoFlag;
   1266   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
   1267   SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag);
   1268   SDValue TgtBALo = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOLoFlag);
   1269   return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
   1270 }
   1271 
   1272 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
   1273                                               SelectionDAG &DAG) const {
   1274 
   1275   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
   1276   DebugLoc dl = GA->getDebugLoc();
   1277   const GlobalValue *GV = GA->getGlobal();
   1278   EVT PtrVT = getPointerTy();
   1279   bool is64bit = PPCSubTarget.isPPC64();
   1280 
   1281   TLSModel::Model model = getTargetMachine().getTLSModel(GV);
   1282 
   1283   SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
   1284                                              PPCII::MO_TPREL16_HA);
   1285   SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
   1286                                              PPCII::MO_TPREL16_LO);
   1287 
   1288   if (model != TLSModel::LocalExec)
   1289     llvm_unreachable("only local-exec TLS mode supported");
   1290   SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2,
   1291                                    is64bit ? MVT::i64 : MVT::i32);
   1292   SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
   1293   return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
   1294 }
   1295 
   1296 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
   1297                                               SelectionDAG &DAG) const {
   1298   EVT PtrVT = Op.getValueType();
   1299   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
   1300   DebugLoc DL = GSDN->getDebugLoc();
   1301   const GlobalValue *GV = GSDN->getGlobal();
   1302 
   1303   // 64-bit SVR4 ABI code is always position-independent.
   1304   // The actual address of the GlobalValue is stored in the TOC.
   1305   if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) {
   1306     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
   1307     return DAG.getNode(PPCISD::TOC_ENTRY, DL, MVT::i64, GA,
   1308                        DAG.getRegister(PPC::X2, MVT::i64));
   1309   }
   1310 
   1311   unsigned MOHiFlag, MOLoFlag;
   1312   bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag, GV);
   1313 
   1314   SDValue GAHi =
   1315     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
   1316   SDValue GALo =
   1317     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
   1318 
   1319   SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
   1320 
   1321   // If the global reference is actually to a non-lazy-pointer, we have to do an
   1322   // extra load to get the address of the global.
   1323   if (MOHiFlag & PPCII::MO_NLP_FLAG)
   1324     Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(),
   1325                       false, false, false, 0);
   1326   return Ptr;
   1327 }
   1328 
   1329 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
   1330   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
   1331   DebugLoc dl = Op.getDebugLoc();
   1332 
   1333   // If we're comparing for equality to zero, expose the fact that this is
   1334   // implented as a ctlz/srl pair on ppc, so that the dag combiner can
   1335   // fold the new nodes.
   1336   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
   1337     if (C->isNullValue() && CC == ISD::SETEQ) {
   1338       EVT VT = Op.getOperand(0).getValueType();
   1339       SDValue Zext = Op.getOperand(0);
   1340       if (VT.bitsLT(MVT::i32)) {
   1341         VT = MVT::i32;
   1342         Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0));
   1343       }
   1344       unsigned Log2b = Log2_32(VT.getSizeInBits());
   1345       SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext);
   1346       SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz,
   1347                                 DAG.getConstant(Log2b, MVT::i32));
   1348       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc);
   1349     }
   1350     // Leave comparisons against 0 and -1 alone for now, since they're usually
   1351     // optimized.  FIXME: revisit this when we can custom lower all setcc
   1352     // optimizations.
   1353     if (C->isAllOnesValue() || C->isNullValue())
   1354       return SDValue();
   1355   }
   1356 
   1357   // If we have an integer seteq/setne, turn it into a compare against zero
   1358   // by xor'ing the rhs with the lhs, which is faster than setting a
   1359   // condition register, reading it back out, and masking the correct bit.  The
   1360   // normal approach here uses sub to do this instead of xor.  Using xor exposes
   1361   // the result to other bit-twiddling opportunities.
   1362   EVT LHSVT = Op.getOperand(0).getValueType();
   1363   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
   1364     EVT VT = Op.getValueType();
   1365     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
   1366                                 Op.getOperand(1));
   1367     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, LHSVT), CC);
   1368   }
   1369   return SDValue();
   1370 }
   1371 
   1372 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG,
   1373                                       const PPCSubtarget &Subtarget) const {
   1374   SDNode *Node = Op.getNode();
   1375   EVT VT = Node->getValueType(0);
   1376   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   1377   SDValue InChain = Node->getOperand(0);
   1378   SDValue VAListPtr = Node->getOperand(1);
   1379   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
   1380   DebugLoc dl = Node->getDebugLoc();
   1381 
   1382   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
   1383 
   1384   // gpr_index
   1385   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
   1386                                     VAListPtr, MachinePointerInfo(SV), MVT::i8,
   1387                                     false, false, 0);
   1388   InChain = GprIndex.getValue(1);
   1389 
   1390   if (VT == MVT::i64) {
   1391     // Check if GprIndex is even
   1392     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
   1393                                  DAG.getConstant(1, MVT::i32));
   1394     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
   1395                                 DAG.getConstant(0, MVT::i32), ISD::SETNE);
   1396     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
   1397                                           DAG.getConstant(1, MVT::i32));
   1398     // Align GprIndex to be even if it isn't
   1399     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
   1400                            GprIndex);
   1401   }
   1402 
   1403   // fpr index is 1 byte after gpr
   1404   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
   1405                                DAG.getConstant(1, MVT::i32));
   1406 
   1407   // fpr
   1408   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
   1409                                     FprPtr, MachinePointerInfo(SV), MVT::i8,
   1410                                     false, false, 0);
   1411   InChain = FprIndex.getValue(1);
   1412 
   1413   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
   1414                                        DAG.getConstant(8, MVT::i32));
   1415 
   1416   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
   1417                                         DAG.getConstant(4, MVT::i32));
   1418 
   1419   // areas
   1420   SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
   1421                                      MachinePointerInfo(), false, false,
   1422                                      false, 0);
   1423   InChain = OverflowArea.getValue(1);
   1424 
   1425   SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
   1426                                     MachinePointerInfo(), false, false,
   1427                                     false, 0);
   1428   InChain = RegSaveArea.getValue(1);
   1429 
   1430   // select overflow_area if index > 8
   1431   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
   1432                             DAG.getConstant(8, MVT::i32), ISD::SETLT);
   1433 
   1434   // adjustment constant gpr_index * 4/8
   1435   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
   1436                                     VT.isInteger() ? GprIndex : FprIndex,
   1437                                     DAG.getConstant(VT.isInteger() ? 4 : 8,
   1438                                                     MVT::i32));
   1439 
   1440   // OurReg = RegSaveArea + RegConstant
   1441   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
   1442                                RegConstant);
   1443 
   1444   // Floating types are 32 bytes into RegSaveArea
   1445   if (VT.isFloatingPoint())
   1446     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
   1447                          DAG.getConstant(32, MVT::i32));
   1448 
   1449   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
   1450   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
   1451                                    VT.isInteger() ? GprIndex : FprIndex,
   1452                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1,
   1453                                                    MVT::i32));
   1454 
   1455   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
   1456                               VT.isInteger() ? VAListPtr : FprPtr,
   1457                               MachinePointerInfo(SV),
   1458                               MVT::i8, false, false, 0);
   1459 
   1460   // determine if we should load from reg_save_area or overflow_area
   1461   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
   1462 
   1463   // increase overflow_area by 4/8 if gpr/fpr > 8
   1464   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
   1465                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
   1466                                           MVT::i32));
   1467 
   1468   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
   1469                              OverflowAreaPlusN);
   1470 
   1471   InChain = DAG.getTruncStore(InChain, dl, OverflowArea,
   1472                               OverflowAreaPtr,
   1473                               MachinePointerInfo(),
   1474                               MVT::i32, false, false, 0);
   1475 
   1476   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(),
   1477                      false, false, false, 0);
   1478 }
   1479 
   1480 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
   1481                                                   SelectionDAG &DAG) const {
   1482   return Op.getOperand(0);
   1483 }
   1484 
   1485 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
   1486                                                 SelectionDAG &DAG) const {
   1487   SDValue Chain = Op.getOperand(0);
   1488   SDValue Trmp = Op.getOperand(1); // trampoline
   1489   SDValue FPtr = Op.getOperand(2); // nested function
   1490   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
   1491   DebugLoc dl = Op.getDebugLoc();
   1492 
   1493   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   1494   bool isPPC64 = (PtrVT == MVT::i64);
   1495   Type *IntPtrTy =
   1496     DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType(
   1497                                                              *DAG.getContext());
   1498 
   1499   TargetLowering::ArgListTy Args;
   1500   TargetLowering::ArgListEntry Entry;
   1501 
   1502   Entry.Ty = IntPtrTy;
   1503   Entry.Node = Trmp; Args.push_back(Entry);
   1504 
   1505   // TrampSize == (isPPC64 ? 48 : 40);
   1506   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40,
   1507                                isPPC64 ? MVT::i64 : MVT::i32);
   1508   Args.push_back(Entry);
   1509 
   1510   Entry.Node = FPtr; Args.push_back(Entry);
   1511   Entry.Node = Nest; Args.push_back(Entry);
   1512 
   1513   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
   1514   TargetLowering::CallLoweringInfo CLI(Chain,
   1515                                        Type::getVoidTy(*DAG.getContext()),
   1516                                        false, false, false, false, 0,
   1517                                        CallingConv::C,
   1518                 /*isTailCall=*/false,
   1519                                        /*doesNotRet=*/false,
   1520                                        /*isReturnValueUsed=*/true,
   1521                 DAG.getExternalSymbol("__trampoline_setup", PtrVT),
   1522                 Args, DAG, dl);
   1523   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
   1524 
   1525   return CallResult.second;
   1526 }
   1527 
   1528 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG,
   1529                                         const PPCSubtarget &Subtarget) const {
   1530   MachineFunction &MF = DAG.getMachineFunction();
   1531   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
   1532 
   1533   DebugLoc dl = Op.getDebugLoc();
   1534 
   1535   if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) {
   1536     // vastart just stores the address of the VarArgsFrameIndex slot into the
   1537     // memory location argument.
   1538     EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   1539     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
   1540     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
   1541     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
   1542                         MachinePointerInfo(SV),
   1543                         false, false, 0);
   1544   }
   1545 
   1546   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
   1547   // We suppose the given va_list is already allocated.
   1548   //
   1549   // typedef struct {
   1550   //  char gpr;     /* index into the array of 8 GPRs
   1551   //                 * stored in the register save area
   1552   //                 * gpr=0 corresponds to r3,
   1553   //                 * gpr=1 to r4, etc.
   1554   //                 */
   1555   //  char fpr;     /* index into the array of 8 FPRs
   1556   //                 * stored in the register save area
   1557   //                 * fpr=0 corresponds to f1,
   1558   //                 * fpr=1 to f2, etc.
   1559   //                 */
   1560   //  char *overflow_arg_area;
   1561   //                /* location on stack that holds
   1562   //                 * the next overflow argument
   1563   //                 */
   1564   //  char *reg_save_area;
   1565   //               /* where r3:r10 and f1:f8 (if saved)
   1566   //                * are stored
   1567   //                */
   1568   // } va_list[1];
   1569 
   1570 
   1571   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), MVT::i32);
   1572   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), MVT::i32);
   1573 
   1574 
   1575   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   1576 
   1577   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
   1578                                             PtrVT);
   1579   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
   1580                                  PtrVT);
   1581 
   1582   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
   1583   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT);
   1584 
   1585   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
   1586   SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT);
   1587 
   1588   uint64_t FPROffset = 1;
   1589   SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT);
   1590 
   1591   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
   1592 
   1593   // Store first byte : number of int regs
   1594   SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR,
   1595                                          Op.getOperand(1),
   1596                                          MachinePointerInfo(SV),
   1597                                          MVT::i8, false, false, 0);
   1598   uint64_t nextOffset = FPROffset;
   1599   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
   1600                                   ConstFPROffset);
   1601 
   1602   // Store second byte : number of float regs
   1603   SDValue secondStore =
   1604     DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
   1605                       MachinePointerInfo(SV, nextOffset), MVT::i8,
   1606                       false, false, 0);
   1607   nextOffset += StackOffset;
   1608   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
   1609 
   1610   // Store second word : arguments given on stack
   1611   SDValue thirdStore =
   1612     DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
   1613                  MachinePointerInfo(SV, nextOffset),
   1614                  false, false, 0);
   1615   nextOffset += FrameOffset;
   1616   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
   1617 
   1618   // Store third word : arguments given in registers
   1619   return DAG.getStore(thirdStore, dl, FR, nextPtr,
   1620                       MachinePointerInfo(SV, nextOffset),
   1621                       false, false, 0);
   1622 
   1623 }
   1624 
   1625 #include "PPCGenCallingConv.inc"
   1626 
   1627 static bool CC_PPC_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
   1628                                      CCValAssign::LocInfo &LocInfo,
   1629                                      ISD::ArgFlagsTy &ArgFlags,
   1630                                      CCState &State) {
   1631   return true;
   1632 }
   1633 
   1634 static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
   1635                                             MVT &LocVT,
   1636                                             CCValAssign::LocInfo &LocInfo,
   1637                                             ISD::ArgFlagsTy &ArgFlags,
   1638                                             CCState &State) {
   1639   static const uint16_t ArgRegs[] = {
   1640     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
   1641     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
   1642   };
   1643   const unsigned NumArgRegs = array_lengthof(ArgRegs);
   1644 
   1645   unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
   1646 
   1647   // Skip one register if the first unallocated register has an even register
   1648   // number and there are still argument registers available which have not been
   1649   // allocated yet. RegNum is actually an index into ArgRegs, which means we
   1650   // need to skip a register if RegNum is odd.
   1651   if (RegNum != NumArgRegs && RegNum % 2 == 1) {
   1652     State.AllocateReg(ArgRegs[RegNum]);
   1653   }
   1654 
   1655   // Always return false here, as this function only makes sure that the first
   1656   // unallocated register has an odd register number and does not actually
   1657   // allocate a register for the current argument.
   1658   return false;
   1659 }
   1660 
   1661 static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
   1662                                               MVT &LocVT,
   1663                                               CCValAssign::LocInfo &LocInfo,
   1664                                               ISD::ArgFlagsTy &ArgFlags,
   1665                                               CCState &State) {
   1666   static const uint16_t ArgRegs[] = {
   1667     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
   1668     PPC::F8
   1669   };
   1670 
   1671   const unsigned NumArgRegs = array_lengthof(ArgRegs);
   1672 
   1673   unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
   1674 
   1675   // If there is only one Floating-point register left we need to put both f64
   1676   // values of a split ppc_fp128 value on the stack.
   1677   if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
   1678     State.AllocateReg(ArgRegs[RegNum]);
   1679   }
   1680 
   1681   // Always return false here, as this function only makes sure that the two f64
   1682   // values a ppc_fp128 value is split into are both passed in registers or both
   1683   // passed on the stack and does not actually allocate a register for the
   1684   // current argument.
   1685   return false;
   1686 }
   1687 
   1688 /// GetFPR - Get the set of FP registers that should be allocated for arguments,
   1689 /// on Darwin.
   1690 static const uint16_t *GetFPR() {
   1691   static const uint16_t FPR[] = {
   1692     PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
   1693     PPC::F8, PPC::F9, PPC::F10, PPC::F11, PPC::F12, PPC::F13
   1694   };
   1695 
   1696   return FPR;
   1697 }
   1698 
   1699 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
   1700 /// the stack.
   1701 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
   1702                                        unsigned PtrByteSize) {
   1703   unsigned ArgSize = ArgVT.getSizeInBits()/8;
   1704   if (Flags.isByVal())
   1705     ArgSize = Flags.getByValSize();
   1706   ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
   1707 
   1708   return ArgSize;
   1709 }
   1710 
   1711 SDValue
   1712 PPCTargetLowering::LowerFormalArguments(SDValue Chain,
   1713                                         CallingConv::ID CallConv, bool isVarArg,
   1714                                         const SmallVectorImpl<ISD::InputArg>
   1715                                           &Ins,
   1716                                         DebugLoc dl, SelectionDAG &DAG,
   1717                                         SmallVectorImpl<SDValue> &InVals)
   1718                                           const {
   1719   if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) {
   1720     return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins,
   1721                                      dl, DAG, InVals);
   1722   } else {
   1723     return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
   1724                                        dl, DAG, InVals);
   1725   }
   1726 }
   1727 
   1728 SDValue
   1729 PPCTargetLowering::LowerFormalArguments_SVR4(
   1730                                       SDValue Chain,
   1731                                       CallingConv::ID CallConv, bool isVarArg,
   1732                                       const SmallVectorImpl<ISD::InputArg>
   1733                                         &Ins,
   1734                                       DebugLoc dl, SelectionDAG &DAG,
   1735                                       SmallVectorImpl<SDValue> &InVals) const {
   1736 
   1737   // 32-bit SVR4 ABI Stack Frame Layout:
   1738   //              +-----------------------------------+
   1739   //        +-->  |            Back chain             |
   1740   //        |     +-----------------------------------+
   1741   //        |     | Floating-point register save area |
   1742   //        |     +-----------------------------------+
   1743   //        |     |    General register save area     |
   1744   //        |     +-----------------------------------+
   1745   //        |     |          CR save word             |
   1746   //        |     +-----------------------------------+
   1747   //        |     |         VRSAVE save word          |
   1748   //        |     +-----------------------------------+
   1749   //        |     |         Alignment padding         |
   1750   //        |     +-----------------------------------+
   1751   //        |     |     Vector register save area     |
   1752   //        |     +-----------------------------------+
   1753   //        |     |       Local variable space        |
   1754   //        |     +-----------------------------------+
   1755   //        |     |        Parameter list area        |
   1756   //        |     +-----------------------------------+
   1757   //        |     |           LR save word            |
   1758   //        |     +-----------------------------------+
   1759   // SP-->  +---  |            Back chain             |
   1760   //              +-----------------------------------+
   1761   //
   1762   // Specifications:
   1763   //   System V Application Binary Interface PowerPC Processor Supplement
   1764   //   AltiVec Technology Programming Interface Manual
   1765 
   1766   MachineFunction &MF = DAG.getMachineFunction();
   1767   MachineFrameInfo *MFI = MF.getFrameInfo();
   1768   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
   1769 
   1770   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   1771   // Potential tail calls could cause overwriting of argument stack slots.
   1772   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
   1773                        (CallConv == CallingConv::Fast));
   1774   unsigned PtrByteSize = 4;
   1775 
   1776   // Assign locations to all of the incoming arguments.
   1777   SmallVector<CCValAssign, 16> ArgLocs;
   1778   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
   1779                  getTargetMachine(), ArgLocs, *DAG.getContext());
   1780 
   1781   // Reserve space for the linkage area on the stack.
   1782   CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
   1783 
   1784   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4);
   1785 
   1786   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
   1787     CCValAssign &VA = ArgLocs[i];
   1788 
   1789     // Arguments stored in registers.
   1790     if (VA.isRegLoc()) {
   1791       const TargetRegisterClass *RC;
   1792       EVT ValVT = VA.getValVT();
   1793 
   1794       switch (ValVT.getSimpleVT().SimpleTy) {
   1795         default:
   1796           llvm_unreachable("ValVT not supported by formal arguments Lowering");
   1797         case MVT::i32:
   1798           RC = &PPC::GPRCRegClass;
   1799           break;
   1800         case MVT::f32:
   1801           RC = &PPC::F4RCRegClass;
   1802           break;
   1803         case MVT::f64:
   1804           RC = &PPC::F8RCRegClass;
   1805           break;
   1806         case MVT::v16i8:
   1807         case MVT::v8i16:
   1808         case MVT::v4i32:
   1809         case MVT::v4f32:
   1810           RC = &PPC::VRRCRegClass;
   1811           break;
   1812       }
   1813 
   1814       // Transform the arguments stored in physical registers into virtual ones.
   1815       unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
   1816       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT);
   1817 
   1818       InVals.push_back(ArgValue);
   1819     } else {
   1820       // Argument stored in memory.
   1821       assert(VA.isMemLoc());
   1822 
   1823       unsigned ArgSize = VA.getLocVT().getSizeInBits() / 8;
   1824       int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(),
   1825                                       isImmutable);
   1826 
   1827       // Create load nodes to retrieve arguments from the stack.
   1828       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
   1829       InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
   1830                                    MachinePointerInfo(),
   1831                                    false, false, false, 0));
   1832     }
   1833   }
   1834 
   1835   // Assign locations to all of the incoming aggregate by value arguments.
   1836   // Aggregates passed by value are stored in the local variable space of the
   1837   // caller's stack frame, right above the parameter list area.
   1838   SmallVector<CCValAssign, 16> ByValArgLocs;
   1839   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
   1840                       getTargetMachine(), ByValArgLocs, *DAG.getContext());
   1841 
   1842   // Reserve stack space for the allocations in CCInfo.
   1843   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
   1844 
   1845   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4_ByVal);
   1846 
   1847   // Area that is at least reserved in the caller of this function.
   1848   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
   1849 
   1850   // Set the size that is at least reserved in caller of this function.  Tail
   1851   // call optimized function's reserved stack space needs to be aligned so that
   1852   // taking the difference between two stack areas will result in an aligned
   1853   // stack.
   1854   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
   1855 
   1856   MinReservedArea =
   1857     std::max(MinReservedArea,
   1858              PPCFrameLowering::getMinCallFrameSize(false, false));
   1859 
   1860   unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
   1861     getStackAlignment();
   1862   unsigned AlignMask = TargetAlign-1;
   1863   MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
   1864 
   1865   FI->setMinReservedArea(MinReservedArea);
   1866 
   1867   SmallVector<SDValue, 8> MemOps;
   1868 
   1869   // If the function takes variable number of arguments, make a frame index for
   1870   // the start of the first vararg value... for expansion of llvm.va_start.
   1871   if (isVarArg) {
   1872     static const uint16_t GPArgRegs[] = {
   1873       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
   1874       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
   1875     };
   1876     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
   1877 
   1878     static const uint16_t FPArgRegs[] = {
   1879       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
   1880       PPC::F8
   1881     };
   1882     const unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
   1883 
   1884     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs,
   1885                                                           NumGPArgRegs));
   1886     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs,
   1887                                                           NumFPArgRegs));
   1888 
   1889     // Make room for NumGPArgRegs and NumFPArgRegs.
   1890     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
   1891                 NumFPArgRegs * EVT(MVT::f64).getSizeInBits()/8;
   1892 
   1893     FuncInfo->setVarArgsStackOffset(
   1894       MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
   1895                              CCInfo.getNextStackOffset(), true));
   1896 
   1897     FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false));
   1898     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
   1899 
   1900     // The fixed integer arguments of a variadic function are stored to the
   1901     // VarArgsFrameIndex on the stack so that they may be loaded by deferencing
   1902     // the result of va_next.
   1903     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
   1904       // Get an existing live-in vreg, or add a new one.
   1905       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
   1906       if (!VReg)
   1907         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
   1908 
   1909       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
   1910       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
   1911                                    MachinePointerInfo(), false, false, 0);
   1912       MemOps.push_back(Store);
   1913       // Increment the address by four for the next argument to store
   1914       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
   1915       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
   1916     }
   1917 
   1918     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
   1919     // is set.
   1920     // The double arguments are stored to the VarArgsFrameIndex
   1921     // on the stack.
   1922     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
   1923       // Get an existing live-in vreg, or add a new one.
   1924       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
   1925       if (!VReg)
   1926         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
   1927 
   1928       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
   1929       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
   1930                                    MachinePointerInfo(), false, false, 0);
   1931       MemOps.push_back(Store);
   1932       // Increment the address by eight for the next argument to store
   1933       SDValue PtrOff = DAG.getConstant(EVT(MVT::f64).getSizeInBits()/8,
   1934                                          PtrVT);
   1935       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
   1936     }
   1937   }
   1938 
   1939   if (!MemOps.empty())
   1940     Chain = DAG.getNode(ISD::TokenFactor, dl,
   1941                         MVT::Other, &MemOps[0], MemOps.size());
   1942 
   1943   return Chain;
   1944 }
   1945 
   1946 SDValue
   1947 PPCTargetLowering::LowerFormalArguments_Darwin(
   1948                                       SDValue Chain,
   1949                                       CallingConv::ID CallConv, bool isVarArg,
   1950                                       const SmallVectorImpl<ISD::InputArg>
   1951                                         &Ins,
   1952                                       DebugLoc dl, SelectionDAG &DAG,
   1953                                       SmallVectorImpl<SDValue> &InVals) const {
   1954   // TODO: add description of PPC stack frame format, or at least some docs.
   1955   //
   1956   MachineFunction &MF = DAG.getMachineFunction();
   1957   MachineFrameInfo *MFI = MF.getFrameInfo();
   1958   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
   1959 
   1960   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   1961   bool isPPC64 = PtrVT == MVT::i64;
   1962   // Potential tail calls could cause overwriting of argument stack slots.
   1963   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
   1964                        (CallConv == CallingConv::Fast));
   1965   unsigned PtrByteSize = isPPC64 ? 8 : 4;
   1966 
   1967   unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
   1968   // Area that is at least reserved in caller of this function.
   1969   unsigned MinReservedArea = ArgOffset;
   1970 
   1971   static const uint16_t GPR_32[] = {           // 32-bit registers.
   1972     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
   1973     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
   1974   };
   1975   static const uint16_t GPR_64[] = {           // 64-bit registers.
   1976     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
   1977     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
   1978   };
   1979 
   1980   static const uint16_t *FPR = GetFPR();
   1981 
   1982   static const uint16_t VR[] = {
   1983     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
   1984     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
   1985   };
   1986 
   1987   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
   1988   const unsigned Num_FPR_Regs = 13;
   1989   const unsigned Num_VR_Regs  = array_lengthof( VR);
   1990 
   1991   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
   1992 
   1993   const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
   1994 
   1995   // In 32-bit non-varargs functions, the stack space for vectors is after the
   1996   // stack space for non-vectors.  We do not use this space unless we have
   1997   // too many vectors to fit in registers, something that only occurs in
   1998   // constructed examples:), but we have to walk the arglist to figure
   1999   // that out...for the pathological case, compute VecArgOffset as the
   2000   // start of the vector parameter area.  Computing VecArgOffset is the
   2001   // entire point of the following loop.
   2002   unsigned VecArgOffset = ArgOffset;
   2003   if (!isVarArg && !isPPC64) {
   2004     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
   2005          ++ArgNo) {
   2006       EVT ObjectVT = Ins[ArgNo].VT;
   2007       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
   2008 
   2009       if (Flags.isByVal()) {
   2010         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
   2011         unsigned ObjSize = Flags.getByValSize();
   2012         unsigned ArgSize =
   2013                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
   2014         VecArgOffset += ArgSize;
   2015         continue;
   2016       }
   2017 
   2018       switch(ObjectVT.getSimpleVT().SimpleTy) {
   2019       default: llvm_unreachable("Unhandled argument type!");
   2020       case MVT::i32:
   2021       case MVT::f32:
   2022         VecArgOffset += isPPC64 ? 8 : 4;
   2023         break;
   2024       case MVT::i64:  // PPC64
   2025       case MVT::f64:
   2026         VecArgOffset += 8;
   2027         break;
   2028       case MVT::v4f32:
   2029       case MVT::v4i32:
   2030       case MVT::v8i16:
   2031       case MVT::v16i8:
   2032         // Nothing to do, we're only looking at Nonvector args here.
   2033         break;
   2034       }
   2035     }
   2036   }
   2037   // We've found where the vector parameter area in memory is.  Skip the
   2038   // first 12 parameters; these don't use that memory.
   2039   VecArgOffset = ((VecArgOffset+15)/16)*16;
   2040   VecArgOffset += 12*16;
   2041 
   2042   // Add DAG nodes to load the arguments or copy them out of registers.  On
   2043   // entry to a function on PPC, the arguments start after the linkage area,
   2044   // although the first ones are often in registers.
   2045 
   2046   SmallVector<SDValue, 8> MemOps;
   2047   unsigned nAltivecParamsAtEnd = 0;
   2048   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
   2049     SDValue ArgVal;
   2050     bool needsLoad = false;
   2051     EVT ObjectVT = Ins[ArgNo].VT;
   2052     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
   2053     unsigned ArgSize = ObjSize;
   2054     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
   2055 
   2056     unsigned CurArgOffset = ArgOffset;
   2057 
   2058     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
   2059     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
   2060         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
   2061       if (isVarArg || isPPC64) {
   2062         MinReservedArea = ((MinReservedArea+15)/16)*16;
   2063         MinReservedArea += CalculateStackSlotSize(ObjectVT,
   2064                                                   Flags,
   2065                                                   PtrByteSize);
   2066       } else  nAltivecParamsAtEnd++;
   2067     } else
   2068       // Calculate min reserved area.
   2069       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
   2070                                                 Flags,
   2071                                                 PtrByteSize);
   2072 
   2073     // FIXME the codegen can be much improved in some cases.
   2074     // We do not have to keep everything in memory.
   2075     if (Flags.isByVal()) {
   2076       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
   2077       ObjSize = Flags.getByValSize();
   2078       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
   2079       // Objects of size 1 and 2 are right justified, everything else is
   2080       // left justified.  This means the memory address is adjusted forwards.
   2081       if (ObjSize==1 || ObjSize==2) {
   2082         CurArgOffset = CurArgOffset + (4 - ObjSize);
   2083       }
   2084       // The value of the object is its address.
   2085       int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true);
   2086       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
   2087       InVals.push_back(FIN);
   2088       if (ObjSize==1 || ObjSize==2) {
   2089         if (GPR_idx != Num_GPR_Regs) {
   2090           unsigned VReg;
   2091           if (isPPC64)
   2092             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
   2093           else
   2094             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
   2095           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
   2096           SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
   2097                                             MachinePointerInfo(),
   2098                                             ObjSize==1 ? MVT::i8 : MVT::i16,
   2099                                             false, false, 0);
   2100           MemOps.push_back(Store);
   2101           ++GPR_idx;
   2102         }
   2103 
   2104         ArgOffset += PtrByteSize;
   2105 
   2106         continue;
   2107       }
   2108       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
   2109         // Store whatever pieces of the object are in registers
   2110         // to memory.  ArgVal will be address of the beginning of
   2111         // the object.
   2112         if (GPR_idx != Num_GPR_Regs) {
   2113           unsigned VReg;
   2114           if (isPPC64)
   2115             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
   2116           else
   2117             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
   2118           int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true);
   2119           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
   2120           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
   2121           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
   2122                                        MachinePointerInfo(),
   2123                                        false, false, 0);
   2124           MemOps.push_back(Store);
   2125           ++GPR_idx;
   2126           ArgOffset += PtrByteSize;
   2127         } else {
   2128           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
   2129           break;
   2130         }
   2131       }
   2132       continue;
   2133     }
   2134 
   2135     switch (ObjectVT.getSimpleVT().SimpleTy) {
   2136     default: llvm_unreachable("Unhandled argument type!");
   2137     case MVT::i32:
   2138       if (!isPPC64) {
   2139         if (GPR_idx != Num_GPR_Regs) {
   2140           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
   2141           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
   2142           ++GPR_idx;
   2143         } else {
   2144           needsLoad = true;
   2145           ArgSize = PtrByteSize;
   2146         }
   2147         // All int arguments reserve stack space in the Darwin ABI.
   2148         ArgOffset += PtrByteSize;
   2149         break;
   2150       }
   2151       // FALLTHROUGH
   2152     case MVT::i64:  // PPC64
   2153       if (GPR_idx != Num_GPR_Regs) {
   2154         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
   2155         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
   2156 
   2157         if (ObjectVT == MVT::i32) {
   2158           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
   2159           // value to MVT::i64 and then truncate to the correct register size.
   2160           if (Flags.isSExt())
   2161             ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
   2162                                  DAG.getValueType(ObjectVT));
   2163           else if (Flags.isZExt())
   2164             ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
   2165                                  DAG.getValueType(ObjectVT));
   2166 
   2167           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
   2168         }
   2169 
   2170         ++GPR_idx;
   2171       } else {
   2172         needsLoad = true;
   2173         ArgSize = PtrByteSize;
   2174       }
   2175       // All int arguments reserve stack space in the Darwin ABI.
   2176       ArgOffset += 8;
   2177       break;
   2178 
   2179     case MVT::f32:
   2180     case MVT::f64:
   2181       // Every 4 bytes of argument space consumes one of the GPRs available for
   2182       // argument passing.
   2183       if (GPR_idx != Num_GPR_Regs) {
   2184         ++GPR_idx;
   2185         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
   2186           ++GPR_idx;
   2187       }
   2188       if (FPR_idx != Num_FPR_Regs) {
   2189         unsigned VReg;
   2190 
   2191         if (ObjectVT == MVT::f32)
   2192           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
   2193         else
   2194           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
   2195 
   2196         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
   2197         ++FPR_idx;
   2198       } else {
   2199         needsLoad = true;
   2200       }
   2201 
   2202       // All FP arguments reserve stack space in the Darwin ABI.
   2203       ArgOffset += isPPC64 ? 8 : ObjSize;
   2204       break;
   2205     case MVT::v4f32:
   2206     case MVT::v4i32:
   2207     case MVT::v8i16:
   2208     case MVT::v16i8:
   2209       // Note that vector arguments in registers don't reserve stack space,
   2210       // except in varargs functions.
   2211       if (VR_idx != Num_VR_Regs) {
   2212         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
   2213         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
   2214         if (isVarArg) {
   2215           while ((ArgOffset % 16) != 0) {
   2216             ArgOffset += PtrByteSize;
   2217             if (GPR_idx != Num_GPR_Regs)
   2218               GPR_idx++;
   2219           }
   2220           ArgOffset += 16;
   2221           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
   2222         }
   2223         ++VR_idx;
   2224       } else {
   2225         if (!isVarArg && !isPPC64) {
   2226           // Vectors go after all the nonvectors.
   2227           CurArgOffset = VecArgOffset;
   2228           VecArgOffset += 16;
   2229         } else {
   2230           // Vectors are aligned.
   2231           ArgOffset = ((ArgOffset+15)/16)*16;
   2232           CurArgOffset = ArgOffset;
   2233           ArgOffset += 16;
   2234         }
   2235         needsLoad = true;
   2236       }
   2237       break;
   2238     }
   2239 
   2240     // We need to load the argument to a virtual register if we determined above
   2241     // that we ran out of physical registers of the appropriate type.
   2242     if (needsLoad) {
   2243       int FI = MFI->CreateFixedObject(ObjSize,
   2244                                       CurArgOffset + (ArgSize - ObjSize),
   2245                                       isImmutable);
   2246       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
   2247       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(),
   2248                            false, false, false, 0);
   2249     }
   2250 
   2251     InVals.push_back(ArgVal);
   2252   }
   2253 
   2254   // Set the size that is at least reserved in caller of this function.  Tail
   2255   // call optimized function's reserved stack space needs to be aligned so that
   2256   // taking the difference between two stack areas will result in an aligned
   2257   // stack.
   2258   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
   2259   // Add the Altivec parameters at the end, if needed.
   2260   if (nAltivecParamsAtEnd) {
   2261     MinReservedArea = ((MinReservedArea+15)/16)*16;
   2262     MinReservedArea += 16*nAltivecParamsAtEnd;
   2263   }
   2264   MinReservedArea =
   2265     std::max(MinReservedArea,
   2266              PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
   2267   unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()->
   2268     getStackAlignment();
   2269   unsigned AlignMask = TargetAlign-1;
   2270   MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
   2271   FI->setMinReservedArea(MinReservedArea);
   2272 
   2273   // If the function takes variable number of arguments, make a frame index for
   2274   // the start of the first vararg value... for expansion of llvm.va_start.
   2275   if (isVarArg) {
   2276     int Depth = ArgOffset;
   2277 
   2278     FuncInfo->setVarArgsFrameIndex(
   2279       MFI->CreateFixedObject(PtrVT.getSizeInBits()/8,
   2280                              Depth, true));
   2281     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
   2282 
   2283     // If this function is vararg, store any remaining integer argument regs
   2284     // to their spots on the stack so that they may be loaded by deferencing the
   2285     // result of va_next.
   2286     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
   2287       unsigned VReg;
   2288 
   2289       if (isPPC64)
   2290         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
   2291       else
   2292         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
   2293 
   2294       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
   2295       SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
   2296                                    MachinePointerInfo(), false, false, 0);
   2297       MemOps.push_back(Store);
   2298       // Increment the address by four for the next argument to store
   2299       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT);
   2300       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
   2301     }
   2302   }
   2303 
   2304   if (!MemOps.empty())
   2305     Chain = DAG.getNode(ISD::TokenFactor, dl,
   2306                         MVT::Other, &MemOps[0], MemOps.size());
   2307 
   2308   return Chain;
   2309 }
   2310 
   2311 /// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus
   2312 /// linkage area for the Darwin ABI.
   2313 static unsigned
   2314 CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG,
   2315                                      bool isPPC64,
   2316                                      bool isVarArg,
   2317                                      unsigned CC,
   2318                                      const SmallVectorImpl<ISD::OutputArg>
   2319                                        &Outs,
   2320                                      const SmallVectorImpl<SDValue> &OutVals,
   2321                                      unsigned &nAltivecParamsAtEnd) {
   2322   // Count how many bytes are to be pushed on the stack, including the linkage
   2323   // area, and parameter passing area.  We start with 24/48 bytes, which is
   2324   // prereserved space for [SP][CR][LR][3 x unused].
   2325   unsigned NumBytes = PPCFrameLowering::getLinkageSize(isPPC64, true);
   2326   unsigned NumOps = Outs.size();
   2327   unsigned PtrByteSize = isPPC64 ? 8 : 4;
   2328 
   2329   // Add up all the space actually used.
   2330   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
   2331   // they all go in registers, but we must reserve stack space for them for
   2332   // possible use by the caller.  In varargs or 64-bit calls, parameters are
   2333   // assigned stack space in order, with padding so Altivec parameters are
   2334   // 16-byte aligned.
   2335   nAltivecParamsAtEnd = 0;
   2336   for (unsigned i = 0; i != NumOps; ++i) {
   2337     ISD::ArgFlagsTy Flags = Outs[i].Flags;
   2338     EVT ArgVT = Outs[i].VT;
   2339     // Varargs Altivec parameters are padded to a 16 byte boundary.
   2340     if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 ||
   2341         ArgVT==MVT::v8i16 || ArgVT==MVT::v16i8) {
   2342       if (!isVarArg && !isPPC64) {
   2343         // Non-varargs Altivec parameters go after all the non-Altivec
   2344         // parameters; handle those later so we know how much padding we need.
   2345         nAltivecParamsAtEnd++;
   2346         continue;
   2347       }
   2348       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
   2349       NumBytes = ((NumBytes+15)/16)*16;
   2350     }
   2351     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
   2352   }
   2353 
   2354    // Allow for Altivec parameters at the end, if needed.
   2355   if (nAltivecParamsAtEnd) {
   2356     NumBytes = ((NumBytes+15)/16)*16;
   2357     NumBytes += 16*nAltivecParamsAtEnd;
   2358   }
   2359 
   2360   // The prolog code of the callee may store up to 8 GPR argument registers to
   2361   // the stack, allowing va_start to index over them in memory if its varargs.
   2362   // Because we cannot tell if this is needed on the caller side, we have to
   2363   // conservatively assume that it is needed.  As such, make sure we have at
   2364   // least enough stack space for the caller to store the 8 GPRs.
   2365   NumBytes = std::max(NumBytes,
   2366                       PPCFrameLowering::getMinCallFrameSize(isPPC64, true));
   2367 
   2368   // Tail call needs the stack to be aligned.
   2369   if (CC == CallingConv::Fast && DAG.getTarget().Options.GuaranteedTailCallOpt){
   2370     unsigned TargetAlign = DAG.getMachineFunction().getTarget().
   2371       getFrameLowering()->getStackAlignment();
   2372     unsigned AlignMask = TargetAlign-1;
   2373     NumBytes = (NumBytes + AlignMask) & ~AlignMask;
   2374   }
   2375 
   2376   return NumBytes;
   2377 }
   2378 
   2379 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
   2380 /// adjusted to accommodate the arguments for the tailcall.
   2381 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
   2382                                    unsigned ParamSize) {
   2383 
   2384   if (!isTailCall) return 0;
   2385 
   2386   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
   2387   unsigned CallerMinReservedArea = FI->getMinReservedArea();
   2388   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
   2389   // Remember only if the new adjustement is bigger.
   2390   if (SPDiff < FI->getTailCallSPDelta())
   2391     FI->setTailCallSPDelta(SPDiff);
   2392 
   2393   return SPDiff;
   2394 }
   2395 
   2396 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
   2397 /// for tail call optimization. Targets which want to do tail call
   2398 /// optimization should implement this function.
   2399 bool
   2400 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
   2401                                                      CallingConv::ID CalleeCC,
   2402                                                      bool isVarArg,
   2403                                       const SmallVectorImpl<ISD::InputArg> &Ins,
   2404                                                      SelectionDAG& DAG) const {
   2405   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
   2406     return false;
   2407 
   2408   // Variable argument functions are not supported.
   2409   if (isVarArg)
   2410     return false;
   2411 
   2412   MachineFunction &MF = DAG.getMachineFunction();
   2413   CallingConv::ID CallerCC = MF.getFunction()->getCallingConv();
   2414   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
   2415     // Functions containing by val parameters are not supported.
   2416     for (unsigned i = 0; i != Ins.size(); i++) {
   2417        ISD::ArgFlagsTy Flags = Ins[i].Flags;
   2418        if (Flags.isByVal()) return false;
   2419     }
   2420 
   2421     // Non PIC/GOT  tail calls are supported.
   2422     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
   2423       return true;
   2424 
   2425     // At the moment we can only do local tail calls (in same module, hidden
   2426     // or protected) if we are generating PIC.
   2427     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
   2428       return G->getGlobal()->hasHiddenVisibility()
   2429           || G->getGlobal()->hasProtectedVisibility();
   2430   }
   2431 
   2432   return false;
   2433 }
   2434 
   2435 /// isCallCompatibleAddress - Return the immediate to use if the specified
   2436 /// 32-bit value is representable in the immediate field of a BxA instruction.
   2437 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
   2438   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
   2439   if (!C) return 0;
   2440 
   2441   int Addr = C->getZExtValue();
   2442   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
   2443       SignExtend32<26>(Addr) != Addr)
   2444     return 0;  // Top 6 bits have to be sext of immediate.
   2445 
   2446   return DAG.getConstant((int)C->getZExtValue() >> 2,
   2447                          DAG.getTargetLoweringInfo().getPointerTy()).getNode();
   2448 }
   2449 
   2450 namespace {
   2451 
   2452 struct TailCallArgumentInfo {
   2453   SDValue Arg;
   2454   SDValue FrameIdxOp;
   2455   int       FrameIdx;
   2456 
   2457   TailCallArgumentInfo() : FrameIdx(0) {}
   2458 };
   2459 
   2460 }
   2461 
   2462 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
   2463 static void
   2464 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
   2465                                            SDValue Chain,
   2466                    const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
   2467                    SmallVector<SDValue, 8> &MemOpChains,
   2468                    DebugLoc dl) {
   2469   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
   2470     SDValue Arg = TailCallArgs[i].Arg;
   2471     SDValue FIN = TailCallArgs[i].FrameIdxOp;
   2472     int FI = TailCallArgs[i].FrameIdx;
   2473     // Store relative to framepointer.
   2474     MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, FIN,
   2475                                        MachinePointerInfo::getFixedStack(FI),
   2476                                        false, false, 0));
   2477   }
   2478 }
   2479 
   2480 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
   2481 /// the appropriate stack slot for the tail call optimized function call.
   2482 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
   2483                                                MachineFunction &MF,
   2484                                                SDValue Chain,
   2485                                                SDValue OldRetAddr,
   2486                                                SDValue OldFP,
   2487                                                int SPDiff,
   2488                                                bool isPPC64,
   2489                                                bool isDarwinABI,
   2490                                                DebugLoc dl) {
   2491   if (SPDiff) {
   2492     // Calculate the new stack slot for the return address.
   2493     int SlotSize = isPPC64 ? 8 : 4;
   2494     int NewRetAddrLoc = SPDiff + PPCFrameLowering::getReturnSaveOffset(isPPC64,
   2495                                                                    isDarwinABI);
   2496     int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize,
   2497                                                           NewRetAddrLoc, true);
   2498     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
   2499     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
   2500     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
   2501                          MachinePointerInfo::getFixedStack(NewRetAddr),
   2502                          false, false, 0);
   2503 
   2504     // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack
   2505     // slot as the FP is never overwritten.
   2506     if (isDarwinABI) {
   2507       int NewFPLoc =
   2508         SPDiff + PPCFrameLowering::getFramePointerSaveOffset(isPPC64, isDarwinABI);
   2509       int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc,
   2510                                                           true);
   2511       SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT);
   2512       Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx,
   2513                            MachinePointerInfo::getFixedStack(NewFPIdx),
   2514                            false, false, 0);
   2515     }
   2516   }
   2517   return Chain;
   2518 }
   2519 
   2520 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
   2521 /// the position of the argument.
   2522 static void
   2523 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
   2524                          SDValue Arg, int SPDiff, unsigned ArgOffset,
   2525                       SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
   2526   int Offset = ArgOffset + SPDiff;
   2527   uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
   2528   int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
   2529   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
   2530   SDValue FIN = DAG.getFrameIndex(FI, VT);
   2531   TailCallArgumentInfo Info;
   2532   Info.Arg = Arg;
   2533   Info.FrameIdxOp = FIN;
   2534   Info.FrameIdx = FI;
   2535   TailCallArguments.push_back(Info);
   2536 }
   2537 
   2538 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
   2539 /// stack slot. Returns the chain as result and the loaded frame pointers in
   2540 /// LROpOut/FPOpout. Used when tail calling.
   2541 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
   2542                                                         int SPDiff,
   2543                                                         SDValue Chain,
   2544                                                         SDValue &LROpOut,
   2545                                                         SDValue &FPOpOut,
   2546                                                         bool isDarwinABI,
   2547                                                         DebugLoc dl) const {
   2548   if (SPDiff) {
   2549     // Load the LR and FP stack slot for later adjusting.
   2550     EVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32;
   2551     LROpOut = getReturnAddrFrameIndex(DAG);
   2552     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
   2553                           false, false, false, 0);
   2554     Chain = SDValue(LROpOut.getNode(), 1);
   2555 
   2556     // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
   2557     // slot as the FP is never overwritten.
   2558     if (isDarwinABI) {
   2559       FPOpOut = getFramePointerFrameIndex(DAG);
   2560       FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(),
   2561                             false, false, false, 0);
   2562       Chain = SDValue(FPOpOut.getNode(), 1);
   2563     }
   2564   }
   2565   return Chain;
   2566 }
   2567 
   2568 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
   2569 /// by "Src" to address "Dst" of size "Size".  Alignment information is
   2570 /// specified by the specific parameter attribute. The copy will be passed as
   2571 /// a byval function parameter.
   2572 /// Sometimes what we are copying is the end of a larger object, the part that
   2573 /// does not fit in registers.
   2574 static SDValue
   2575 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
   2576                           ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
   2577                           DebugLoc dl) {
   2578   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
   2579   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
   2580                        false, false, MachinePointerInfo(0),
   2581                        MachinePointerInfo(0));
   2582 }
   2583 
   2584 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
   2585 /// tail calls.
   2586 static void
   2587 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
   2588                  SDValue Arg, SDValue PtrOff, int SPDiff,
   2589                  unsigned ArgOffset, bool isPPC64, bool isTailCall,
   2590                  bool isVector, SmallVector<SDValue, 8> &MemOpChains,
   2591                  SmallVector<TailCallArgumentInfo, 8> &TailCallArguments,
   2592                  DebugLoc dl) {
   2593   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   2594   if (!isTailCall) {
   2595     if (isVector) {
   2596       SDValue StackPtr;
   2597       if (isPPC64)
   2598         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
   2599       else
   2600         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
   2601       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
   2602                            DAG.getConstant(ArgOffset, PtrVT));
   2603     }
   2604     MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
   2605                                        MachinePointerInfo(), false, false, 0));
   2606   // Calculate and remember argument location.
   2607   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
   2608                                   TailCallArguments);
   2609 }
   2610 
   2611 static
   2612 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
   2613                      DebugLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
   2614                      SDValue LROp, SDValue FPOp, bool isDarwinABI,
   2615                      SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) {
   2616   MachineFunction &MF = DAG.getMachineFunction();
   2617 
   2618   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
   2619   // might overwrite each other in case of tail call optimization.
   2620   SmallVector<SDValue, 8> MemOpChains2;
   2621   // Do not flag preceding copytoreg stuff together with the following stuff.
   2622   InFlag = SDValue();
   2623   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
   2624                                     MemOpChains2, dl);
   2625   if (!MemOpChains2.empty())
   2626     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
   2627                         &MemOpChains2[0], MemOpChains2.size());
   2628 
   2629   // Store the return address to the appropriate stack slot.
   2630   Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff,
   2631                                         isPPC64, isDarwinABI, dl);
   2632 
   2633   // Emit callseq_end just before tailcall node.
   2634   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
   2635                              DAG.getIntPtrConstant(0, true), InFlag);
   2636   InFlag = Chain.getValue(1);
   2637 }
   2638 
   2639 static
   2640 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
   2641                      SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall,
   2642                      SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
   2643                      SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
   2644                      const PPCSubtarget &PPCSubTarget) {
   2645 
   2646   bool isPPC64 = PPCSubTarget.isPPC64();
   2647   bool isSVR4ABI = PPCSubTarget.isSVR4ABI();
   2648 
   2649   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   2650   NodeTys.push_back(MVT::Other);   // Returns a chain
   2651   NodeTys.push_back(MVT::Glue);    // Returns a flag for retval copy to use.
   2652 
   2653   unsigned CallOpc = isSVR4ABI ? PPCISD::CALL_SVR4 : PPCISD::CALL_Darwin;
   2654 
   2655   bool needIndirectCall = true;
   2656   if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) {
   2657     // If this is an absolute destination address, use the munged value.
   2658     Callee = SDValue(Dest, 0);
   2659     needIndirectCall = false;
   2660   }
   2661 
   2662   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
   2663     // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201
   2664     // Use indirect calls for ALL functions calls in JIT mode, since the
   2665     // far-call stubs may be outside relocation limits for a BL instruction.
   2666     if (!DAG.getTarget().getSubtarget<PPCSubtarget>().isJITCodeModel()) {
   2667       unsigned OpFlags = 0;
   2668       if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
   2669           (PPCSubTarget.getTargetTriple().isMacOSX() &&
   2670            PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5)) &&
   2671           (G->getGlobal()->isDeclaration() ||
   2672            G->getGlobal()->isWeakForLinker())) {
   2673         // PC-relative references to external symbols should go through $stub,
   2674         // unless we're building with the leopard linker or later, which
   2675         // automatically synthesizes these stubs.
   2676         OpFlags = PPCII::MO_DARWIN_STUB;
   2677       }
   2678 
   2679       // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
   2680       // every direct call is) turn it into a TargetGlobalAddress /
   2681       // TargetExternalSymbol node so that legalize doesn't hack it.
   2682       Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
   2683                                           Callee.getValueType(),
   2684                                           0, OpFlags);
   2685       needIndirectCall = false;
   2686     }
   2687   }
   2688 
   2689   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
   2690     unsigned char OpFlags = 0;
   2691 
   2692     if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
   2693         (PPCSubTarget.getTargetTriple().isMacOSX() &&
   2694          PPCSubTarget.getTargetTriple().isMacOSXVersionLT(10, 5))) {
   2695       // PC-relative references to external symbols should go through $stub,
   2696       // unless we're building with the leopard linker or later, which
   2697       // automatically synthesizes these stubs.
   2698       OpFlags = PPCII::MO_DARWIN_STUB;
   2699     }
   2700 
   2701     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
   2702                                          OpFlags);
   2703     needIndirectCall = false;
   2704   }
   2705 
   2706   if (needIndirectCall) {
   2707     // Otherwise, this is an indirect call.  We have to use a MTCTR/BCTRL pair
   2708     // to do the call, we can't use PPCISD::CALL.
   2709     SDValue MTCTROps[] = {Chain, Callee, InFlag};
   2710 
   2711     if (isSVR4ABI && isPPC64) {
   2712       // Function pointers in the 64-bit SVR4 ABI do not point to the function
   2713       // entry point, but to the function descriptor (the function entry point
   2714       // address is part of the function descriptor though).
   2715       // The function descriptor is a three doubleword structure with the
   2716       // following fields: function entry point, TOC base address and
   2717       // environment pointer.
   2718       // Thus for a call through a function pointer, the following actions need
   2719       // to be performed:
   2720       //   1. Save the TOC of the caller in the TOC save area of its stack
   2721       //      frame (this is done in LowerCall_Darwin()).
   2722       //   2. Load the address of the function entry point from the function
   2723       //      descriptor.
   2724       //   3. Load the TOC of the callee from the function descriptor into r2.
   2725       //   4. Load the environment pointer from the function descriptor into
   2726       //      r11.
   2727       //   5. Branch to the function entry point address.
   2728       //   6. On return of the callee, the TOC of the caller needs to be
   2729       //      restored (this is done in FinishCall()).
   2730       //
   2731       // All those operations are flagged together to ensure that no other
   2732       // operations can be scheduled in between. E.g. without flagging the
   2733       // operations together, a TOC access in the caller could be scheduled
   2734       // between the load of the callee TOC and the branch to the callee, which
   2735       // results in the TOC access going through the TOC of the callee instead
   2736       // of going through the TOC of the caller, which leads to incorrect code.
   2737 
   2738       // Load the address of the function entry point from the function
   2739       // descriptor.
   2740       SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Glue);
   2741       SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps,
   2742                                         InFlag.getNode() ? 3 : 2);
   2743       Chain = LoadFuncPtr.getValue(1);
   2744       InFlag = LoadFuncPtr.getValue(2);
   2745 
   2746       // Load environment pointer into r11.
   2747       // Offset of the environment pointer within the function descriptor.
   2748       SDValue PtrOff = DAG.getIntPtrConstant(16);
   2749 
   2750       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff);
   2751       SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr,
   2752                                        InFlag);
   2753       Chain = LoadEnvPtr.getValue(1);
   2754       InFlag = LoadEnvPtr.getValue(2);
   2755 
   2756       SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr,
   2757                                         InFlag);
   2758       Chain = EnvVal.getValue(0);
   2759       InFlag = EnvVal.getValue(1);
   2760 
   2761       // Load TOC of the callee into r2. We are using a target-specific load
   2762       // with r2 hard coded, because the result of a target-independent load
   2763       // would never go directly into r2, since r2 is a reserved register (which
   2764       // prevents the register allocator from allocating it), resulting in an
   2765       // additional register being allocated and an unnecessary move instruction
   2766       // being generated.
   2767       VTs = DAG.getVTList(MVT::Other, MVT::Glue);
   2768       SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain,
   2769                                        Callee, InFlag);
   2770       Chain = LoadTOCPtr.getValue(0);
   2771       InFlag = LoadTOCPtr.getValue(1);
   2772 
   2773       MTCTROps[0] = Chain;
   2774       MTCTROps[1] = LoadFuncPtr;
   2775       MTCTROps[2] = InFlag;
   2776     }
   2777 
   2778     Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps,
   2779                         2 + (InFlag.getNode() != 0));
   2780     InFlag = Chain.getValue(1);
   2781 
   2782     NodeTys.clear();
   2783     NodeTys.push_back(MVT::Other);
   2784     NodeTys.push_back(MVT::Glue);
   2785     Ops.push_back(Chain);
   2786     CallOpc = isSVR4ABI ? PPCISD::BCTRL_SVR4 : PPCISD::BCTRL_Darwin;
   2787     Callee.setNode(0);
   2788     // Add CTR register as callee so a bctr can be emitted later.
   2789     if (isTailCall)
   2790       Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT));
   2791   }
   2792 
   2793   // If this is a direct call, pass the chain and the callee.
   2794   if (Callee.getNode()) {
   2795     Ops.push_back(Chain);
   2796     Ops.push_back(Callee);
   2797   }
   2798   // If this is a tail call add stack pointer delta.
   2799   if (isTailCall)
   2800     Ops.push_back(DAG.getConstant(SPDiff, MVT::i32));
   2801 
   2802   // Add argument registers to the end of the list so that they are known live
   2803   // into the call.
   2804   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
   2805     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
   2806                                   RegsToPass[i].second.getValueType()));
   2807 
   2808   return CallOpc;
   2809 }
   2810 
   2811 SDValue
   2812 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
   2813                                    CallingConv::ID CallConv, bool isVarArg,
   2814                                    const SmallVectorImpl<ISD::InputArg> &Ins,
   2815                                    DebugLoc dl, SelectionDAG &DAG,
   2816                                    SmallVectorImpl<SDValue> &InVals) const {
   2817 
   2818   SmallVector<CCValAssign, 16> RVLocs;
   2819   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(),
   2820                     getTargetMachine(), RVLocs, *DAG.getContext());
   2821   CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC);
   2822 
   2823   // Copy all of the result registers out of their specified physreg.
   2824   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
   2825     CCValAssign &VA = RVLocs[i];
   2826     EVT VT = VA.getValVT();
   2827     assert(VA.isRegLoc() && "Can only return in registers!");
   2828     Chain = DAG.getCopyFromReg(Chain, dl,
   2829                                VA.getLocReg(), VT, InFlag).getValue(1);
   2830     InVals.push_back(Chain.getValue(0));
   2831     InFlag = Chain.getValue(2);
   2832   }
   2833 
   2834   return Chain;
   2835 }
   2836 
   2837 SDValue
   2838 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl,
   2839                               bool isTailCall, bool isVarArg,
   2840                               SelectionDAG &DAG,
   2841                               SmallVector<std::pair<unsigned, SDValue>, 8>
   2842                                 &RegsToPass,
   2843                               SDValue InFlag, SDValue Chain,
   2844                               SDValue &Callee,
   2845                               int SPDiff, unsigned NumBytes,
   2846                               const SmallVectorImpl<ISD::InputArg> &Ins,
   2847                               SmallVectorImpl<SDValue> &InVals) const {
   2848   std::vector<EVT> NodeTys;
   2849   SmallVector<SDValue, 8> Ops;
   2850   unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff,
   2851                                  isTailCall, RegsToPass, Ops, NodeTys,
   2852                                  PPCSubTarget);
   2853 
   2854   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
   2855   if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64())
   2856     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
   2857 
   2858   // When performing tail call optimization the callee pops its arguments off
   2859   // the stack. Account for this here so these bytes can be pushed back on in
   2860   // PPCRegisterInfo::eliminateCallFramePseudoInstr.
   2861   int BytesCalleePops =
   2862     (CallConv == CallingConv::Fast &&
   2863      getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0;
   2864 
   2865   // Add a register mask operand representing the call-preserved registers.
   2866   const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
   2867   const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
   2868   assert(Mask && "Missing call preserved mask for calling convention");
   2869   Ops.push_back(DAG.getRegisterMask(Mask));
   2870 
   2871   if (InFlag.getNode())
   2872     Ops.push_back(InFlag);
   2873 
   2874   // Emit tail call.
   2875   if (isTailCall) {
   2876     // If this is the first return lowered for this function, add the regs
   2877     // to the liveout set for the function.
   2878     if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
   2879       SmallVector<CCValAssign, 16> RVLocs;
   2880       CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
   2881                      getTargetMachine(), RVLocs, *DAG.getContext());
   2882       CCInfo.AnalyzeCallResult(Ins, RetCC_PPC);
   2883       for (unsigned i = 0; i != RVLocs.size(); ++i)
   2884         DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
   2885     }
   2886 
   2887     assert(((Callee.getOpcode() == ISD::Register &&
   2888              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
   2889             Callee.getOpcode() == ISD::TargetExternalSymbol ||
   2890             Callee.getOpcode() == ISD::TargetGlobalAddress ||
   2891             isa<ConstantSDNode>(Callee)) &&
   2892     "Expecting an global address, external symbol, absolute value or register");
   2893 
   2894     return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, &Ops[0], Ops.size());
   2895   }
   2896 
   2897   // Add a NOP immediately after the branch instruction when using the 64-bit
   2898   // SVR4 ABI. At link time, if caller and callee are in a different module and
   2899   // thus have a different TOC, the call will be replaced with a call to a stub
   2900   // function which saves the current TOC, loads the TOC of the callee and
   2901   // branches to the callee. The NOP will be replaced with a load instruction
   2902   // which restores the TOC of the caller from the TOC save slot of the current
   2903   // stack frame. If caller and callee belong to the same module (and have the
   2904   // same TOC), the NOP will remain unchanged.
   2905 
   2906   bool needsTOCRestore = false;
   2907   if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) {
   2908     if (CallOpc == PPCISD::BCTRL_SVR4) {
   2909       // This is a call through a function pointer.
   2910       // Restore the caller TOC from the save area into R2.
   2911       // See PrepareCall() for more information about calls through function
   2912       // pointers in the 64-bit SVR4 ABI.
   2913       // We are using a target-specific load with r2 hard coded, because the
   2914       // result of a target-independent load would never go directly into r2,
   2915       // since r2 is a reserved register (which prevents the register allocator
   2916       // from allocating it), resulting in an additional register being
   2917       // allocated and an unnecessary move instruction being generated.
   2918       needsTOCRestore = true;
   2919     } else if (CallOpc == PPCISD::CALL_SVR4) {
   2920       // Otherwise insert NOP.
   2921       CallOpc = PPCISD::CALL_NOP_SVR4;
   2922     }
   2923   }
   2924 
   2925   Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size());
   2926   InFlag = Chain.getValue(1);
   2927 
   2928   if (needsTOCRestore) {
   2929     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
   2930     Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag);
   2931     InFlag = Chain.getValue(1);
   2932   }
   2933 
   2934   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
   2935                              DAG.getIntPtrConstant(BytesCalleePops, true),
   2936                              InFlag);
   2937   if (!Ins.empty())
   2938     InFlag = Chain.getValue(1);
   2939 
   2940   return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
   2941                          Ins, dl, DAG, InVals);
   2942 }
   2943 
   2944 SDValue
   2945 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
   2946                              SmallVectorImpl<SDValue> &InVals) const {
   2947   SelectionDAG &DAG                     = CLI.DAG;
   2948   DebugLoc &dl                          = CLI.DL;
   2949   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
   2950   SmallVector<SDValue, 32> &OutVals     = CLI.OutVals;
   2951   SmallVector<ISD::InputArg, 32> &Ins   = CLI.Ins;
   2952   SDValue Chain                         = CLI.Chain;
   2953   SDValue Callee                        = CLI.Callee;
   2954   bool &isTailCall                      = CLI.IsTailCall;
   2955   CallingConv::ID CallConv              = CLI.CallConv;
   2956   bool isVarArg                         = CLI.IsVarArg;
   2957 
   2958   if (isTailCall)
   2959     isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
   2960                                                    Ins, DAG);
   2961 
   2962   if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64())
   2963     return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg,
   2964                           isTailCall, Outs, OutVals, Ins,
   2965                           dl, DAG, InVals);
   2966 
   2967   return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
   2968                           isTailCall, Outs, OutVals, Ins,
   2969                           dl, DAG, InVals);
   2970 }
   2971 
   2972 SDValue
   2973 PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
   2974                                   CallingConv::ID CallConv, bool isVarArg,
   2975                                   bool isTailCall,
   2976                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
   2977                                   const SmallVectorImpl<SDValue> &OutVals,
   2978                                   const SmallVectorImpl<ISD::InputArg> &Ins,
   2979                                   DebugLoc dl, SelectionDAG &DAG,
   2980                                   SmallVectorImpl<SDValue> &InVals) const {
   2981   // See PPCTargetLowering::LowerFormalArguments_SVR4() for a description
   2982   // of the 32-bit SVR4 ABI stack frame layout.
   2983 
   2984   assert((CallConv == CallingConv::C ||
   2985           CallConv == CallingConv::Fast) && "Unknown calling convention!");
   2986 
   2987   unsigned PtrByteSize = 4;
   2988 
   2989   MachineFunction &MF = DAG.getMachineFunction();
   2990 
   2991   // Mark this function as potentially containing a function that contains a
   2992   // tail call. As a consequence the frame pointer will be used for dynamicalloc
   2993   // and restoring the callers stack pointer in this functions epilog. This is
   2994   // done because by tail calling the called function might overwrite the value
   2995   // in this function's (MF) stack pointer stack slot 0(SP).
   2996   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
   2997       CallConv == CallingConv::Fast)
   2998     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
   2999 
   3000   // Count how many bytes are to be pushed on the stack, including the linkage
   3001   // area, parameter list area and the part of the local variable space which
   3002   // contains copies of aggregates which are passed by value.
   3003 
   3004   // Assign locations to all of the outgoing arguments.
   3005   SmallVector<CCValAssign, 16> ArgLocs;
   3006   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
   3007                  getTargetMachine(), ArgLocs, *DAG.getContext());
   3008 
   3009   // Reserve space for the linkage area on the stack.
   3010   CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false), PtrByteSize);
   3011 
   3012   if (isVarArg) {
   3013     // Handle fixed and variable vector arguments differently.
   3014     // Fixed vector arguments go into registers as long as registers are
   3015     // available. Variable vector arguments always go into memory.
   3016     unsigned NumArgs = Outs.size();
   3017 
   3018     for (unsigned i = 0; i != NumArgs; ++i) {
   3019       MVT ArgVT = Outs[i].VT;
   3020       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
   3021       bool Result;
   3022 
   3023       if (Outs[i].IsFixed) {
   3024         Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
   3025                              CCInfo);
   3026       } else {
   3027         Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
   3028                                     ArgFlags, CCInfo);
   3029       }
   3030 
   3031       if (Result) {
   3032 #ifndef NDEBUG
   3033         errs() << "Call operand #" << i << " has unhandled type "
   3034              << EVT(ArgVT).getEVTString() << "\n";
   3035 #endif
   3036         llvm_unreachable(0);
   3037       }
   3038     }
   3039   } else {
   3040     // All arguments are treated the same.
   3041     CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4);
   3042   }
   3043 
   3044   // Assign locations to all of the outgoing aggregate by value arguments.
   3045   SmallVector<CCValAssign, 16> ByValArgLocs;
   3046   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
   3047                       getTargetMachine(), ByValArgLocs, *DAG.getContext());
   3048 
   3049   // Reserve stack space for the allocations in CCInfo.
   3050   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
   3051 
   3052   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4_ByVal);
   3053 
   3054   // Size of the linkage area, parameter list area and the part of the local
   3055   // space variable where copies of aggregates which are passed by value are
   3056   // stored.
   3057   unsigned NumBytes = CCByValInfo.getNextStackOffset();
   3058 
   3059   // Calculate by how many bytes the stack has to be adjusted in case of tail
   3060   // call optimization.
   3061   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
   3062 
   3063   // Adjust the stack pointer for the new arguments...
   3064   // These operations are automatically eliminated by the prolog/epilog pass
   3065   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
   3066   SDValue CallSeqStart = Chain;
   3067 
   3068   // Load the return address and frame pointer so it can be moved somewhere else
   3069   // later.
   3070   SDValue LROp, FPOp;
   3071   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false,
   3072                                        dl);
   3073 
   3074   // Set up a copy of the stack pointer for use loading and storing any
   3075   // arguments that may not fit in the registers available for argument
   3076   // passing.
   3077   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
   3078 
   3079   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
   3080   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
   3081   SmallVector<SDValue, 8> MemOpChains;
   3082 
   3083   bool seenFloatArg = false;
   3084   // Walk the register/memloc assignments, inserting copies/loads.
   3085   for (unsigned i = 0, j = 0, e = ArgLocs.size();
   3086        i != e;
   3087        ++i) {
   3088     CCValAssign &VA = ArgLocs[i];
   3089     SDValue Arg = OutVals[i];
   3090     ISD::ArgFlagsTy Flags = Outs[i].Flags;
   3091 
   3092     if (Flags.isByVal()) {
   3093       // Argument is an aggregate which is passed by value, thus we need to
   3094       // create a copy of it in the local variable space of the current stack
   3095       // frame (which is the stack frame of the caller) and pass the address of
   3096       // this copy to the callee.
   3097       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
   3098       CCValAssign &ByValVA = ByValArgLocs[j++];
   3099       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
   3100 
   3101       // Memory reserved in the local variable space of the callers stack frame.
   3102       unsigned LocMemOffset = ByValVA.getLocMemOffset();
   3103 
   3104       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
   3105       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
   3106 
   3107       // Create a copy of the argument in the local area of the current
   3108       // stack frame.
   3109       SDValue MemcpyCall =
   3110         CreateCopyOfByValArgument(Arg, PtrOff,
   3111                                   CallSeqStart.getNode()->getOperand(0),
   3112                                   Flags, DAG, dl);
   3113 
   3114       // This must go outside the CALLSEQ_START..END.
   3115       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
   3116                            CallSeqStart.getNode()->getOperand(1));
   3117       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
   3118                              NewCallSeqStart.getNode());
   3119       Chain = CallSeqStart = NewCallSeqStart;
   3120 
   3121       // Pass the address of the aggregate copy on the stack either in a
   3122       // physical register or in the parameter list area of the current stack
   3123       // frame to the callee.
   3124       Arg = PtrOff;
   3125     }
   3126 
   3127     if (VA.isRegLoc()) {
   3128       seenFloatArg |= VA.getLocVT().isFloatingPoint();
   3129       // Put argument in a physical register.
   3130       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
   3131     } else {
   3132       // Put argument in the parameter list area of the current stack frame.
   3133       assert(VA.isMemLoc());
   3134       unsigned LocMemOffset = VA.getLocMemOffset();
   3135 
   3136       if (!isTailCall) {
   3137         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
   3138         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
   3139 
   3140         MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
   3141                                            MachinePointerInfo(),
   3142                                            false, false, 0));
   3143       } else {
   3144         // Calculate and remember argument location.
   3145         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
   3146                                  TailCallArguments);
   3147       }
   3148     }
   3149   }
   3150 
   3151   if (!MemOpChains.empty())
   3152     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
   3153                         &MemOpChains[0], MemOpChains.size());
   3154 
   3155   // Build a sequence of copy-to-reg nodes chained together with token chain
   3156   // and flag operands which copy the outgoing args into the appropriate regs.
   3157   SDValue InFlag;
   3158   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
   3159     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
   3160                              RegsToPass[i].second, InFlag);
   3161     InFlag = Chain.getValue(1);
   3162   }
   3163 
   3164   // Set CR bit 6 to true if this is a vararg call with floating args passed in
   3165   // registers.
   3166   if (isVarArg) {
   3167     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
   3168     SDValue Ops[] = { Chain, InFlag };
   3169 
   3170     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
   3171                         dl, VTs, Ops, InFlag.getNode() ? 2 : 1);
   3172 
   3173     InFlag = Chain.getValue(1);
   3174   }
   3175 
   3176   if (isTailCall)
   3177     PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp,
   3178                     false, TailCallArguments);
   3179 
   3180   return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
   3181                     RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
   3182                     Ins, InVals);
   3183 }
   3184 
   3185 SDValue
   3186 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
   3187                                     CallingConv::ID CallConv, bool isVarArg,
   3188                                     bool isTailCall,
   3189                                     const SmallVectorImpl<ISD::OutputArg> &Outs,
   3190                                     const SmallVectorImpl<SDValue> &OutVals,
   3191                                     const SmallVectorImpl<ISD::InputArg> &Ins,
   3192                                     DebugLoc dl, SelectionDAG &DAG,
   3193                                     SmallVectorImpl<SDValue> &InVals) const {
   3194 
   3195   unsigned NumOps  = Outs.size();
   3196 
   3197   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   3198   bool isPPC64 = PtrVT == MVT::i64;
   3199   unsigned PtrByteSize = isPPC64 ? 8 : 4;
   3200 
   3201   MachineFunction &MF = DAG.getMachineFunction();
   3202 
   3203   // Mark this function as potentially containing a function that contains a
   3204   // tail call. As a consequence the frame pointer will be used for dynamicalloc
   3205   // and restoring the callers stack pointer in this functions epilog. This is
   3206   // done because by tail calling the called function might overwrite the value
   3207   // in this function's (MF) stack pointer stack slot 0(SP).
   3208   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
   3209       CallConv == CallingConv::Fast)
   3210     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
   3211 
   3212   unsigned nAltivecParamsAtEnd = 0;
   3213 
   3214   // Count how many bytes are to be pushed on the stack, including the linkage
   3215   // area, and parameter passing area.  We start with 24/48 bytes, which is
   3216   // prereserved space for [SP][CR][LR][3 x unused].
   3217   unsigned NumBytes =
   3218     CalculateParameterAndLinkageAreaSize(DAG, isPPC64, isVarArg, CallConv,
   3219                                          Outs, OutVals,
   3220                                          nAltivecParamsAtEnd);
   3221 
   3222   // Calculate by how many bytes the stack has to be adjusted in case of tail
   3223   // call optimization.
   3224   int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
   3225 
   3226   // To protect arguments on the stack from being clobbered in a tail call,
   3227   // force all the loads to happen before doing any other lowering.
   3228   if (isTailCall)
   3229     Chain = DAG.getStackArgumentTokenFactor(Chain);
   3230 
   3231   // Adjust the stack pointer for the new arguments...
   3232   // These operations are automatically eliminated by the prolog/epilog pass
   3233   Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
   3234   SDValue CallSeqStart = Chain;
   3235 
   3236   // Load the return address and frame pointer so it can be move somewhere else
   3237   // later.
   3238   SDValue LROp, FPOp;
   3239   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true,
   3240                                        dl);
   3241 
   3242   // Set up a copy of the stack pointer for use loading and storing any
   3243   // arguments that may not fit in the registers available for argument
   3244   // passing.
   3245   SDValue StackPtr;
   3246   if (isPPC64)
   3247     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
   3248   else
   3249     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
   3250 
   3251   // Figure out which arguments are going to go in registers, and which in
   3252   // memory.  Also, if this is a vararg function, floating point operations
   3253   // must be stored to our stack, and loaded into integer regs as well, if
   3254   // any integer regs are available for argument passing.
   3255   unsigned ArgOffset = PPCFrameLowering::getLinkageSize(isPPC64, true);
   3256   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
   3257 
   3258   static const uint16_t GPR_32[] = {           // 32-bit registers.
   3259     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
   3260     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
   3261   };
   3262   static const uint16_t GPR_64[] = {           // 64-bit registers.
   3263     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
   3264     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
   3265   };
   3266   static const uint16_t *FPR = GetFPR();
   3267 
   3268   static const uint16_t VR[] = {
   3269     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
   3270     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
   3271   };
   3272   const unsigned NumGPRs = array_lengthof(GPR_32);
   3273   const unsigned NumFPRs = 13;
   3274   const unsigned NumVRs  = array_lengthof(VR);
   3275 
   3276   const uint16_t *GPR = isPPC64 ? GPR_64 : GPR_32;
   3277 
   3278   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
   3279   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
   3280 
   3281   SmallVector<SDValue, 8> MemOpChains;
   3282   for (unsigned i = 0; i != NumOps; ++i) {
   3283     SDValue Arg = OutVals[i];
   3284     ISD::ArgFlagsTy Flags = Outs[i].Flags;
   3285 
   3286     // PtrOff will be used to store the current argument to the stack if a
   3287     // register cannot be found for it.
   3288     SDValue PtrOff;
   3289 
   3290     PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
   3291 
   3292     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
   3293 
   3294     // On PPC64, promote integers to 64-bit values.
   3295     if (isPPC64 && Arg.getValueType() == MVT::i32) {
   3296       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
   3297       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
   3298       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
   3299     }
   3300 
   3301     // FIXME memcpy is used way more than necessary.  Correctness first.
   3302     if (Flags.isByVal()) {
   3303       unsigned Size = Flags.getByValSize();
   3304       if (Size==1 || Size==2) {
   3305         // Very small objects are passed right-justified.
   3306         // Everything else is passed left-justified.
   3307         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
   3308         if (GPR_idx != NumGPRs) {
   3309           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
   3310                                         MachinePointerInfo(), VT,
   3311                                         false, false, 0);
   3312           MemOpChains.push_back(Load.getValue(1));
   3313           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
   3314 
   3315           ArgOffset += PtrByteSize;
   3316         } else {
   3317           SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType());
   3318           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
   3319           SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr,
   3320                                 CallSeqStart.getNode()->getOperand(0),
   3321                                 Flags, DAG, dl);
   3322           // This must go outside the CALLSEQ_START..END.
   3323           SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
   3324                                CallSeqStart.getNode()->getOperand(1));
   3325           DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
   3326                                  NewCallSeqStart.getNode());
   3327           Chain = CallSeqStart = NewCallSeqStart;
   3328           ArgOffset += PtrByteSize;
   3329         }
   3330         continue;
   3331       }
   3332       // Copy entire object into memory.  There are cases where gcc-generated
   3333       // code assumes it is there, even if it could be put entirely into
   3334       // registers.  (This is not what the doc says.)
   3335       SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
   3336                             CallSeqStart.getNode()->getOperand(0),
   3337                             Flags, DAG, dl);
   3338       // This must go outside the CALLSEQ_START..END.
   3339       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
   3340                            CallSeqStart.getNode()->getOperand(1));
   3341       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode());
   3342       Chain = CallSeqStart = NewCallSeqStart;
   3343       // And copy the pieces of it that fit into registers.
   3344       for (unsigned j=0; j<Size; j+=PtrByteSize) {
   3345         SDValue Const = DAG.getConstant(j, PtrOff.getValueType());
   3346         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
   3347         if (GPR_idx != NumGPRs) {
   3348           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
   3349                                      MachinePointerInfo(),
   3350                                      false, false, false, 0);
   3351           MemOpChains.push_back(Load.getValue(1));
   3352           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
   3353           ArgOffset += PtrByteSize;
   3354         } else {
   3355           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
   3356           break;
   3357         }
   3358       }
   3359       continue;
   3360     }
   3361 
   3362     switch (Arg.getValueType().getSimpleVT().SimpleTy) {
   3363     default: llvm_unreachable("Unexpected ValueType for argument!");
   3364     case MVT::i32:
   3365     case MVT::i64:
   3366       if (GPR_idx != NumGPRs) {
   3367         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
   3368       } else {
   3369         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
   3370                          isPPC64, isTailCall, false, MemOpChains,
   3371                          TailCallArguments, dl);
   3372       }
   3373       ArgOffset += PtrByteSize;
   3374       break;
   3375     case MVT::f32:
   3376     case MVT::f64:
   3377       if (FPR_idx != NumFPRs) {
   3378         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
   3379 
   3380         if (isVarArg) {
   3381           SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
   3382                                        MachinePointerInfo(), false, false, 0);
   3383           MemOpChains.push_back(Store);
   3384 
   3385           // Float varargs are always shadowed in available integer registers
   3386           if (GPR_idx != NumGPRs) {
   3387             SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
   3388                                        MachinePointerInfo(), false, false,
   3389                                        false, 0);
   3390             MemOpChains.push_back(Load.getValue(1));
   3391             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
   3392           }
   3393           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
   3394             SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType());
   3395             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
   3396             SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff,
   3397                                        MachinePointerInfo(),
   3398                                        false, false, false, 0);
   3399             MemOpChains.push_back(Load.getValue(1));
   3400             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
   3401           }
   3402         } else {
   3403           // If we have any FPRs remaining, we may also have GPRs remaining.
   3404           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
   3405           // GPRs.
   3406           if (GPR_idx != NumGPRs)
   3407             ++GPR_idx;
   3408           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
   3409               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
   3410             ++GPR_idx;
   3411         }
   3412       } else {
   3413         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
   3414                          isPPC64, isTailCall, false, MemOpChains,
   3415                          TailCallArguments, dl);
   3416       }
   3417       if (isPPC64)
   3418         ArgOffset += 8;
   3419       else
   3420         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
   3421       break;
   3422     case MVT::v4f32:
   3423     case MVT::v4i32:
   3424     case MVT::v8i16:
   3425     case MVT::v16i8:
   3426       if (isVarArg) {
   3427         // These go aligned on the stack, or in the corresponding R registers
   3428         // when within range.  The Darwin PPC ABI doc claims they also go in
   3429         // V registers; in fact gcc does this only for arguments that are
   3430         // prototyped, not for those that match the ...  We do it for all
   3431         // arguments, seems to work.
   3432         while (ArgOffset % 16 !=0) {
   3433           ArgOffset += PtrByteSize;
   3434           if (GPR_idx != NumGPRs)
   3435             GPR_idx++;
   3436         }
   3437         // We could elide this store in the case where the object fits
   3438         // entirely in R registers.  Maybe later.
   3439         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
   3440                             DAG.getConstant(ArgOffset, PtrVT));
   3441         SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff,
   3442                                      MachinePointerInfo(), false, false, 0);
   3443         MemOpChains.push_back(Store);
   3444         if (VR_idx != NumVRs) {
   3445           SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
   3446                                      MachinePointerInfo(),
   3447                                      false, false, false, 0);
   3448           MemOpChains.push_back(Load.getValue(1));
   3449           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
   3450         }
   3451         ArgOffset += 16;
   3452         for (unsigned i=0; i<16; i+=PtrByteSize) {
   3453           if (GPR_idx == NumGPRs)
   3454             break;
   3455           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
   3456                                   DAG.getConstant(i, PtrVT));
   3457           SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(),
   3458                                      false, false, false, 0);
   3459           MemOpChains.push_back(Load.getValue(1));
   3460           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
   3461         }
   3462         break;
   3463       }
   3464 
   3465       // Non-varargs Altivec params generally go in registers, but have
   3466       // stack space allocated at the end.
   3467       if (VR_idx != NumVRs) {
   3468         // Doesn't have GPR space allocated.
   3469         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
   3470       } else if (nAltivecParamsAtEnd==0) {
   3471         // We are emitting Altivec params in order.
   3472         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
   3473                          isPPC64, isTailCall, true, MemOpChains,
   3474                          TailCallArguments, dl);
   3475         ArgOffset += 16;
   3476       }
   3477       break;
   3478     }
   3479   }
   3480   // If all Altivec parameters fit in registers, as they usually do,
   3481   // they get stack space following the non-Altivec parameters.  We
   3482   // don't track this here because nobody below needs it.
   3483   // If there are more Altivec parameters than fit in registers emit
   3484   // the stores here.
   3485   if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
   3486     unsigned j = 0;
   3487     // Offset is aligned; skip 1st 12 params which go in V registers.
   3488     ArgOffset = ((ArgOffset+15)/16)*16;
   3489     ArgOffset += 12*16;
   3490     for (unsigned i = 0; i != NumOps; ++i) {
   3491       SDValue Arg = OutVals[i];
   3492       EVT ArgType = Outs[i].VT;
   3493       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
   3494           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
   3495         if (++j > NumVRs) {
   3496           SDValue PtrOff;
   3497           // We are emitting Altivec params in order.
   3498           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
   3499                            isPPC64, isTailCall, true, MemOpChains,
   3500                            TailCallArguments, dl);
   3501           ArgOffset += 16;
   3502         }
   3503       }
   3504     }
   3505   }
   3506 
   3507   if (!MemOpChains.empty())
   3508     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
   3509                         &MemOpChains[0], MemOpChains.size());
   3510 
   3511   // Check if this is an indirect call (MTCTR/BCTRL).
   3512   // See PrepareCall() for more information about calls through function
   3513   // pointers in the 64-bit SVR4 ABI.
   3514   if (!isTailCall && isPPC64 && PPCSubTarget.isSVR4ABI() &&
   3515       !dyn_cast<GlobalAddressSDNode>(Callee) &&
   3516       !dyn_cast<ExternalSymbolSDNode>(Callee) &&
   3517       !isBLACompatibleAddress(Callee, DAG)) {
   3518     // Load r2 into a virtual register and store it to the TOC save area.
   3519     SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
   3520     // TOC save area offset.
   3521     SDValue PtrOff = DAG.getIntPtrConstant(40);
   3522     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
   3523     Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(),
   3524                          false, false, 0);
   3525   }
   3526 
   3527   // On Darwin, R12 must contain the address of an indirect callee.  This does
   3528   // not mean the MTCTR instruction must use R12; it's easier to model this as
   3529   // an extra parameter, so do that.
   3530   if (!isTailCall &&
   3531       !dyn_cast<GlobalAddressSDNode>(Callee) &&
   3532       !dyn_cast<ExternalSymbolSDNode>(Callee) &&
   3533       !isBLACompatibleAddress(Callee, DAG))
   3534     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
   3535                                                    PPC::R12), Callee));
   3536 
   3537   // Build a sequence of copy-to-reg nodes chained together with token chain
   3538   // and flag operands which copy the outgoing args into the appropriate regs.
   3539   SDValue InFlag;
   3540   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
   3541     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
   3542                              RegsToPass[i].second, InFlag);
   3543     InFlag = Chain.getValue(1);
   3544   }
   3545 
   3546   if (isTailCall)
   3547     PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
   3548                     FPOp, true, TailCallArguments);
   3549 
   3550   return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG,
   3551                     RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes,
   3552                     Ins, InVals);
   3553 }
   3554 
   3555 bool
   3556 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
   3557                                   MachineFunction &MF, bool isVarArg,
   3558                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
   3559                                   LLVMContext &Context) const {
   3560   SmallVector<CCValAssign, 16> RVLocs;
   3561   CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
   3562                  RVLocs, Context);
   3563   return CCInfo.CheckReturn(Outs, RetCC_PPC);
   3564 }
   3565 
   3566 SDValue
   3567 PPCTargetLowering::LowerReturn(SDValue Chain,
   3568                                CallingConv::ID CallConv, bool isVarArg,
   3569                                const SmallVectorImpl<ISD::OutputArg> &Outs,
   3570                                const SmallVectorImpl<SDValue> &OutVals,
   3571                                DebugLoc dl, SelectionDAG &DAG) const {
   3572 
   3573   SmallVector<CCValAssign, 16> RVLocs;
   3574   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
   3575                  getTargetMachine(), RVLocs, *DAG.getContext());
   3576   CCInfo.AnalyzeReturn(Outs, RetCC_PPC);
   3577 
   3578   // If this is the first return lowered for this function, add the regs to the
   3579   // liveout set for the function.
   3580   if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
   3581     for (unsigned i = 0; i != RVLocs.size(); ++i)
   3582       DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
   3583   }
   3584 
   3585   SDValue Flag;
   3586 
   3587   // Copy the result values into the output registers.
   3588   for (unsigned i = 0; i != RVLocs.size(); ++i) {
   3589     CCValAssign &VA = RVLocs[i];
   3590     assert(VA.isRegLoc() && "Can only return in registers!");
   3591     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
   3592                              OutVals[i], Flag);
   3593     Flag = Chain.getValue(1);
   3594   }
   3595 
   3596   if (Flag.getNode())
   3597     return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
   3598   else
   3599     return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, Chain);
   3600 }
   3601 
   3602 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG,
   3603                                    const PPCSubtarget &Subtarget) const {
   3604   // When we pop the dynamic allocation we need to restore the SP link.
   3605   DebugLoc dl = Op.getDebugLoc();
   3606 
   3607   // Get the corect type for pointers.
   3608   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   3609 
   3610   // Construct the stack pointer operand.
   3611   bool isPPC64 = Subtarget.isPPC64();
   3612   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
   3613   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
   3614 
   3615   // Get the operands for the STACKRESTORE.
   3616   SDValue Chain = Op.getOperand(0);
   3617   SDValue SaveSP = Op.getOperand(1);
   3618 
   3619   // Load the old link SP.
   3620   SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr,
   3621                                    MachinePointerInfo(),
   3622                                    false, false, false, 0);
   3623 
   3624   // Restore the stack pointer.
   3625   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
   3626 
   3627   // Store the old link SP.
   3628   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(),
   3629                       false, false, 0);
   3630 }
   3631 
   3632 
   3633 
   3634 SDValue
   3635 PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const {
   3636   MachineFunction &MF = DAG.getMachineFunction();
   3637   bool isPPC64 = PPCSubTarget.isPPC64();
   3638   bool isDarwinABI = PPCSubTarget.isDarwinABI();
   3639   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   3640 
   3641   // Get current frame pointer save index.  The users of this index will be
   3642   // primarily DYNALLOC instructions.
   3643   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
   3644   int RASI = FI->getReturnAddrSaveIndex();
   3645 
   3646   // If the frame pointer save index hasn't been defined yet.
   3647   if (!RASI) {
   3648     // Find out what the fix offset of the frame pointer save area.
   3649     int LROffset = PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI);
   3650     // Allocate the frame index for frame pointer save area.
   3651     RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, true);
   3652     // Save the result.
   3653     FI->setReturnAddrSaveIndex(RASI);
   3654   }
   3655   return DAG.getFrameIndex(RASI, PtrVT);
   3656 }
   3657 
   3658 SDValue
   3659 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
   3660   MachineFunction &MF = DAG.getMachineFunction();
   3661   bool isPPC64 = PPCSubTarget.isPPC64();
   3662   bool isDarwinABI = PPCSubTarget.isDarwinABI();
   3663   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   3664 
   3665   // Get current frame pointer save index.  The users of this index will be
   3666   // primarily DYNALLOC instructions.
   3667   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
   3668   int FPSI = FI->getFramePointerSaveIndex();
   3669 
   3670   // If the frame pointer save index hasn't been defined yet.
   3671   if (!FPSI) {
   3672     // Find out what the fix offset of the frame pointer save area.
   3673     int FPOffset = PPCFrameLowering::getFramePointerSaveOffset(isPPC64,
   3674                                                            isDarwinABI);
   3675 
   3676     // Allocate the frame index for frame pointer save area.
   3677     FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
   3678     // Save the result.
   3679     FI->setFramePointerSaveIndex(FPSI);
   3680   }
   3681   return DAG.getFrameIndex(FPSI, PtrVT);
   3682 }
   3683 
   3684 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
   3685                                          SelectionDAG &DAG,
   3686                                          const PPCSubtarget &Subtarget) const {
   3687   // Get the inputs.
   3688   SDValue Chain = Op.getOperand(0);
   3689   SDValue Size  = Op.getOperand(1);
   3690   DebugLoc dl = Op.getDebugLoc();
   3691 
   3692   // Get the corect type for pointers.
   3693   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   3694   // Negate the size.
   3695   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
   3696                                   DAG.getConstant(0, PtrVT), Size);
   3697   // Construct a node for the frame pointer save index.
   3698   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
   3699   // Build a DYNALLOC node.
   3700   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
   3701   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
   3702   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops, 3);
   3703 }
   3704 
   3705 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
   3706 /// possible.
   3707 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
   3708   // Not FP? Not a fsel.
   3709   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
   3710       !Op.getOperand(2).getValueType().isFloatingPoint())
   3711     return Op;
   3712 
   3713   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
   3714 
   3715   // Cannot handle SETEQ/SETNE.
   3716   if (CC == ISD::SETEQ || CC == ISD::SETNE) return Op;
   3717 
   3718   EVT ResVT = Op.getValueType();
   3719   EVT CmpVT = Op.getOperand(0).getValueType();
   3720   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
   3721   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
   3722   DebugLoc dl = Op.getDebugLoc();
   3723 
   3724   // If the RHS of the comparison is a 0.0, we don't need to do the
   3725   // subtraction at all.
   3726   if (isFloatingPointZero(RHS))
   3727     switch (CC) {
   3728     default: break;       // SETUO etc aren't handled by fsel.
   3729     case ISD::SETULT:
   3730     case ISD::SETLT:
   3731       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
   3732     case ISD::SETOGE:
   3733     case ISD::SETGE:
   3734       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
   3735         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
   3736       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
   3737     case ISD::SETUGT:
   3738     case ISD::SETGT:
   3739       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
   3740     case ISD::SETOLE:
   3741     case ISD::SETLE:
   3742       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
   3743         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
   3744       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
   3745                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
   3746     }
   3747 
   3748   SDValue Cmp;
   3749   switch (CC) {
   3750   default: break;       // SETUO etc aren't handled by fsel.
   3751   case ISD::SETULT:
   3752   case ISD::SETLT:
   3753     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
   3754     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
   3755       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
   3756       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
   3757   case ISD::SETOGE:
   3758   case ISD::SETGE:
   3759     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS);
   3760     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
   3761       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
   3762       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
   3763   case ISD::SETUGT:
   3764   case ISD::SETGT:
   3765     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
   3766     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
   3767       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
   3768       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
   3769   case ISD::SETOLE:
   3770   case ISD::SETLE:
   3771     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS);
   3772     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
   3773       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
   3774       return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
   3775   }
   3776   return Op;
   3777 }
   3778 
   3779 // FIXME: Split this code up when LegalizeDAGTypes lands.
   3780 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
   3781                                            DebugLoc dl) const {
   3782   assert(Op.getOperand(0).getValueType().isFloatingPoint());
   3783   SDValue Src = Op.getOperand(0);
   3784   if (Src.getValueType() == MVT::f32)
   3785     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
   3786 
   3787   SDValue Tmp;
   3788   switch (Op.getValueType().getSimpleVT().SimpleTy) {
   3789   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
   3790   case MVT::i32:
   3791     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
   3792                                                          PPCISD::FCTIDZ,
   3793                       dl, MVT::f64, Src);
   3794     break;
   3795   case MVT::i64:
   3796     Tmp = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Src);
   3797     break;
   3798   }
   3799 
   3800   // Convert the FP value to an int value through memory.
   3801   SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64);
   3802 
   3803   // Emit a store to the stack slot.
   3804   SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr,
   3805                                MachinePointerInfo(), false, false, 0);
   3806 
   3807   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
   3808   // add in a bias.
   3809   if (Op.getValueType() == MVT::i32)
   3810     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
   3811                         DAG.getConstant(4, FIPtr.getValueType()));
   3812   return DAG.getLoad(Op.getValueType(), dl, Chain, FIPtr, MachinePointerInfo(),
   3813                      false, false, false, 0);
   3814 }
   3815 
   3816 SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
   3817                                            SelectionDAG &DAG) const {
   3818   DebugLoc dl = Op.getDebugLoc();
   3819   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
   3820   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
   3821     return SDValue();
   3822 
   3823   if (Op.getOperand(0).getValueType() == MVT::i64) {
   3824     SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
   3825     SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
   3826     if (Op.getValueType() == MVT::f32)
   3827       FP = DAG.getNode(ISD::FP_ROUND, dl,
   3828                        MVT::f32, FP, DAG.getIntPtrConstant(0));
   3829     return FP;
   3830   }
   3831 
   3832   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
   3833          "Unhandled SINT_TO_FP type in custom expander!");
   3834   // Since we only generate this in 64-bit mode, we can take advantage of
   3835   // 64-bit registers.  In particular, sign extend the input value into the
   3836   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
   3837   // then lfd it and fcfid it.
   3838   MachineFunction &MF = DAG.getMachineFunction();
   3839   MachineFrameInfo *FrameInfo = MF.getFrameInfo();
   3840   int FrameIdx = FrameInfo->CreateStackObject(8, 8, false);
   3841   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   3842   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
   3843 
   3844   SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, dl, MVT::i32,
   3845                                 Op.getOperand(0));
   3846 
   3847   // STD the extended value into the stack slot.
   3848   MachineMemOperand *MMO =
   3849     MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx),
   3850                             MachineMemOperand::MOStore, 8, 8);
   3851   SDValue Ops[] = { DAG.getEntryNode(), Ext64, FIdx };
   3852   SDValue Store =
   3853     DAG.getMemIntrinsicNode(PPCISD::STD_32, dl, DAG.getVTList(MVT::Other),
   3854                             Ops, 4, MVT::i64, MMO);
   3855   // Load the value as a double.
   3856   SDValue Ld = DAG.getLoad(MVT::f64, dl, Store, FIdx, MachinePointerInfo(),
   3857                            false, false, false, 0);
   3858 
   3859   // FCFID it and return it.
   3860   SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Ld);
   3861   if (Op.getValueType() == MVT::f32)
   3862     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, DAG.getIntPtrConstant(0));
   3863   return FP;
   3864 }
   3865 
   3866 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
   3867                                             SelectionDAG &DAG) const {
   3868   DebugLoc dl = Op.getDebugLoc();
   3869   /*
   3870    The rounding mode is in bits 30:31 of FPSR, and has the following
   3871    settings:
   3872      00 Round to nearest
   3873      01 Round to 0
   3874      10 Round to +inf
   3875      11 Round to -inf
   3876 
   3877   FLT_ROUNDS, on the other hand, expects the following:
   3878     -1 Undefined
   3879      0 Round to 0
   3880      1 Round to nearest
   3881      2 Round to +inf
   3882      3 Round to -inf
   3883 
   3884   To perform the conversion, we do:
   3885     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
   3886   */
   3887 
   3888   MachineFunction &MF = DAG.getMachineFunction();
   3889   EVT VT = Op.getValueType();
   3890   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   3891   std::vector<EVT> NodeTys;
   3892   SDValue MFFSreg, InFlag;
   3893 
   3894   // Save FP Control Word to register
   3895   NodeTys.push_back(MVT::f64);    // return register
   3896   NodeTys.push_back(MVT::Glue);   // unused in this context
   3897   SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
   3898 
   3899   // Save FP register to stack slot
   3900   int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false);
   3901   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
   3902   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain,
   3903                                StackSlot, MachinePointerInfo(), false, false,0);
   3904 
   3905   // Load FP Control Word from low 32 bits of stack slot.
   3906   SDValue Four = DAG.getConstant(4, PtrVT);
   3907   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
   3908   SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo(),
   3909                             false, false, false, 0);
   3910 
   3911   // Transform as necessary
   3912   SDValue CWD1 =
   3913     DAG.getNode(ISD::AND, dl, MVT::i32,
   3914                 CWD, DAG.getConstant(3, MVT::i32));
   3915   SDValue CWD2 =
   3916     DAG.getNode(ISD::SRL, dl, MVT::i32,
   3917                 DAG.getNode(ISD::AND, dl, MVT::i32,
   3918                             DAG.getNode(ISD::XOR, dl, MVT::i32,
   3919                                         CWD, DAG.getConstant(3, MVT::i32)),
   3920                             DAG.getConstant(3, MVT::i32)),
   3921                 DAG.getConstant(1, MVT::i32));
   3922 
   3923   SDValue RetVal =
   3924     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
   3925 
   3926   return DAG.getNode((VT.getSizeInBits() < 16 ?
   3927                       ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal);
   3928 }
   3929 
   3930 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
   3931   EVT VT = Op.getValueType();
   3932   unsigned BitWidth = VT.getSizeInBits();
   3933   DebugLoc dl = Op.getDebugLoc();
   3934   assert(Op.getNumOperands() == 3 &&
   3935          VT == Op.getOperand(1).getValueType() &&
   3936          "Unexpected SHL!");
   3937 
   3938   // Expand into a bunch of logical ops.  Note that these ops
   3939   // depend on the PPC behavior for oversized shift amounts.
   3940   SDValue Lo = Op.getOperand(0);
   3941   SDValue Hi = Op.getOperand(1);
   3942   SDValue Amt = Op.getOperand(2);
   3943   EVT AmtVT = Amt.getValueType();
   3944 
   3945   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
   3946                              DAG.getConstant(BitWidth, AmtVT), Amt);
   3947   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
   3948   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
   3949   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
   3950   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
   3951                              DAG.getConstant(-BitWidth, AmtVT));
   3952   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
   3953   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
   3954   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
   3955   SDValue OutOps[] = { OutLo, OutHi };
   3956   return DAG.getMergeValues(OutOps, 2, dl);
   3957 }
   3958 
   3959 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
   3960   EVT VT = Op.getValueType();
   3961   DebugLoc dl = Op.getDebugLoc();
   3962   unsigned BitWidth = VT.getSizeInBits();
   3963   assert(Op.getNumOperands() == 3 &&
   3964          VT == Op.getOperand(1).getValueType() &&
   3965          "Unexpected SRL!");
   3966 
   3967   // Expand into a bunch of logical ops.  Note that these ops
   3968   // depend on the PPC behavior for oversized shift amounts.
   3969   SDValue Lo = Op.getOperand(0);
   3970   SDValue Hi = Op.getOperand(1);
   3971   SDValue Amt = Op.getOperand(2);
   3972   EVT AmtVT = Amt.getValueType();
   3973 
   3974   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
   3975                              DAG.getConstant(BitWidth, AmtVT), Amt);
   3976   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
   3977   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
   3978   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
   3979   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
   3980                              DAG.getConstant(-BitWidth, AmtVT));
   3981   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
   3982   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
   3983   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
   3984   SDValue OutOps[] = { OutLo, OutHi };
   3985   return DAG.getMergeValues(OutOps, 2, dl);
   3986 }
   3987 
   3988 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
   3989   DebugLoc dl = Op.getDebugLoc();
   3990   EVT VT = Op.getValueType();
   3991   unsigned BitWidth = VT.getSizeInBits();
   3992   assert(Op.getNumOperands() == 3 &&
   3993          VT == Op.getOperand(1).getValueType() &&
   3994          "Unexpected SRA!");
   3995 
   3996   // Expand into a bunch of logical ops, followed by a select_cc.
   3997   SDValue Lo = Op.getOperand(0);
   3998   SDValue Hi = Op.getOperand(1);
   3999   SDValue Amt = Op.getOperand(2);
   4000   EVT AmtVT = Amt.getValueType();
   4001 
   4002   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
   4003                              DAG.getConstant(BitWidth, AmtVT), Amt);
   4004   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
   4005   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
   4006   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
   4007   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
   4008                              DAG.getConstant(-BitWidth, AmtVT));
   4009   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
   4010   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
   4011   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, AmtVT),
   4012                                   Tmp4, Tmp6, ISD::SETLE);
   4013   SDValue OutOps[] = { OutLo, OutHi };
   4014   return DAG.getMergeValues(OutOps, 2, dl);
   4015 }
   4016 
   4017 //===----------------------------------------------------------------------===//
   4018 // Vector related lowering.
   4019 //
   4020 
   4021 /// BuildSplatI - Build a canonical splati of Val with an element size of
   4022 /// SplatSize.  Cast the result to VT.
   4023 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
   4024                              SelectionDAG &DAG, DebugLoc dl) {
   4025   assert(Val >= -16 && Val <= 15 && "vsplti is out of range!");
   4026 
   4027   static const EVT VTys[] = { // canonical VT to use for each size.
   4028     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
   4029   };
   4030 
   4031   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
   4032 
   4033   // Force vspltis[hw] -1 to vspltisb -1 to canonicalize.
   4034   if (Val == -1)
   4035     SplatSize = 1;
   4036 
   4037   EVT CanonicalVT = VTys[SplatSize-1];
   4038 
   4039   // Build a canonical splat for this value.
   4040   SDValue Elt = DAG.getConstant(Val, MVT::i32);
   4041   SmallVector<SDValue, 8> Ops;
   4042   Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
   4043   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
   4044                               &Ops[0], Ops.size());
   4045   return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
   4046 }
   4047 
   4048 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
   4049 /// specified intrinsic ID.
   4050 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
   4051                                 SelectionDAG &DAG, DebugLoc dl,
   4052                                 EVT DestVT = MVT::Other) {
   4053   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
   4054   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
   4055                      DAG.getConstant(IID, MVT::i32), LHS, RHS);
   4056 }
   4057 
   4058 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
   4059 /// specified intrinsic ID.
   4060 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
   4061                                 SDValue Op2, SelectionDAG &DAG,
   4062                                 DebugLoc dl, EVT DestVT = MVT::Other) {
   4063   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
   4064   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
   4065                      DAG.getConstant(IID, MVT::i32), Op0, Op1, Op2);
   4066 }
   4067 
   4068 
   4069 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
   4070 /// amount.  The result has the specified value type.
   4071 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
   4072                              EVT VT, SelectionDAG &DAG, DebugLoc dl) {
   4073   // Force LHS/RHS to be the right type.
   4074   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
   4075   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
   4076 
   4077   int Ops[16];
   4078   for (unsigned i = 0; i != 16; ++i)
   4079     Ops[i] = i + Amt;
   4080   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
   4081   return DAG.getNode(ISD::BITCAST, dl, VT, T);
   4082 }
   4083 
   4084 // If this is a case we can't handle, return null and let the default
   4085 // expansion code take care of it.  If we CAN select this case, and if it
   4086 // selects to a single instruction, return Op.  Otherwise, if we can codegen
   4087 // this case more efficiently than a constant pool load, lower it to the
   4088 // sequence of ops that should be used.
   4089 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
   4090                                              SelectionDAG &DAG) const {
   4091   DebugLoc dl = Op.getDebugLoc();
   4092   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
   4093   assert(BVN != 0 && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
   4094 
   4095   // Check if this is a splat of a constant value.
   4096   APInt APSplatBits, APSplatUndef;
   4097   unsigned SplatBitSize;
   4098   bool HasAnyUndefs;
   4099   if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
   4100                              HasAnyUndefs, 0, true) || SplatBitSize > 32)
   4101     return SDValue();
   4102 
   4103   unsigned SplatBits = APSplatBits.getZExtValue();
   4104   unsigned SplatUndef = APSplatUndef.getZExtValue();
   4105   unsigned SplatSize = SplatBitSize / 8;
   4106 
   4107   // First, handle single instruction cases.
   4108 
   4109   // All zeros?
   4110   if (SplatBits == 0) {
   4111     // Canonicalize all zero vectors to be v4i32.
   4112     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
   4113       SDValue Z = DAG.getConstant(0, MVT::i32);
   4114       Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
   4115       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
   4116     }
   4117     return Op;
   4118   }
   4119 
   4120   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
   4121   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
   4122                     (32-SplatBitSize));
   4123   if (SextVal >= -16 && SextVal <= 15)
   4124     return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl);
   4125 
   4126 
   4127   // Two instruction sequences.
   4128 
   4129   // If this value is in the range [-32,30] and is even, use:
   4130   //    tmp = VSPLTI[bhw], result = add tmp, tmp
   4131   if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
   4132     SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
   4133     Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
   4134     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
   4135   }
   4136 
   4137   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
   4138   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
   4139   // for fneg/fabs.
   4140   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
   4141     // Make -1 and vspltisw -1:
   4142     SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl);
   4143 
   4144     // Make the VSLW intrinsic, computing 0x8000_0000.
   4145     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
   4146                                    OnesV, DAG, dl);
   4147 
   4148     // xor by OnesV to invert it.
   4149     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
   4150     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
   4151   }
   4152 
   4153   // Check to see if this is a wide variety of vsplti*, binop self cases.
   4154   static const signed char SplatCsts[] = {
   4155     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
   4156     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
   4157   };
   4158 
   4159   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
   4160     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
   4161     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
   4162     int i = SplatCsts[idx];
   4163 
   4164     // Figure out what shift amount will be used by altivec if shifted by i in
   4165     // this splat size.
   4166     unsigned TypeShiftAmt = i & (SplatBitSize-1);
   4167 
   4168     // vsplti + shl self.
   4169     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
   4170       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
   4171       static const unsigned IIDs[] = { // Intrinsic to use for each size.
   4172         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
   4173         Intrinsic::ppc_altivec_vslw
   4174       };
   4175       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
   4176       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
   4177     }
   4178 
   4179     // vsplti + srl self.
   4180     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
   4181       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
   4182       static const unsigned IIDs[] = { // Intrinsic to use for each size.
   4183         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
   4184         Intrinsic::ppc_altivec_vsrw
   4185       };
   4186       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
   4187       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
   4188     }
   4189 
   4190     // vsplti + sra self.
   4191     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
   4192       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
   4193       static const unsigned IIDs[] = { // Intrinsic to use for each size.
   4194         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
   4195         Intrinsic::ppc_altivec_vsraw
   4196       };
   4197       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
   4198       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
   4199     }
   4200 
   4201     // vsplti + rol self.
   4202     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
   4203                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
   4204       SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl);
   4205       static const unsigned IIDs[] = { // Intrinsic to use for each size.
   4206         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
   4207         Intrinsic::ppc_altivec_vrlw
   4208       };
   4209       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
   4210       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
   4211     }
   4212 
   4213     // t = vsplti c, result = vsldoi t, t, 1
   4214     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
   4215       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
   4216       return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl);
   4217     }
   4218     // t = vsplti c, result = vsldoi t, t, 2
   4219     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
   4220       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
   4221       return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl);
   4222     }
   4223     // t = vsplti c, result = vsldoi t, t, 3
   4224     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
   4225       SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl);
   4226       return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl);
   4227     }
   4228   }
   4229 
   4230   // Three instruction sequences.
   4231 
   4232   // Odd, in range [17,31]:  (vsplti C)-(vsplti -16).
   4233   if (SextVal >= 0 && SextVal <= 31) {
   4234     SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
   4235     SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
   4236     LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
   4237     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
   4238   }
   4239   // Odd, in range [-31,-17]:  (vsplti C)+(vsplti -16).
   4240   if (SextVal >= -31 && SextVal <= 0) {
   4241     SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
   4242     SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
   4243     LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
   4244     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
   4245   }
   4246 
   4247   return SDValue();
   4248 }
   4249 
   4250 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
   4251 /// the specified operations to build the shuffle.
   4252 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
   4253                                       SDValue RHS, SelectionDAG &DAG,
   4254                                       DebugLoc dl) {
   4255   unsigned OpNum = (PFEntry >> 26) & 0x0F;
   4256   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
   4257   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
   4258 
   4259   enum {
   4260     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
   4261     OP_VMRGHW,
   4262     OP_VMRGLW,
   4263     OP_VSPLTISW0,
   4264     OP_VSPLTISW1,
   4265     OP_VSPLTISW2,
   4266     OP_VSPLTISW3,
   4267     OP_VSLDOI4,
   4268     OP_VSLDOI8,
   4269     OP_VSLDOI12
   4270   };
   4271 
   4272   if (OpNum == OP_COPY) {
   4273     if (LHSID == (1*9+2)*9+3) return LHS;
   4274     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
   4275     return RHS;
   4276   }
   4277 
   4278   SDValue OpLHS, OpRHS;
   4279   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
   4280   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
   4281 
   4282   int ShufIdxs[16];
   4283   switch (OpNum) {
   4284   default: llvm_unreachable("Unknown i32 permute!");
   4285   case OP_VMRGHW:
   4286     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
   4287     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
   4288     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
   4289     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
   4290     break;
   4291   case OP_VMRGLW:
   4292     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
   4293     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
   4294     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
   4295     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
   4296     break;
   4297   case OP_VSPLTISW0:
   4298     for (unsigned i = 0; i != 16; ++i)
   4299       ShufIdxs[i] = (i&3)+0;
   4300     break;
   4301   case OP_VSPLTISW1:
   4302     for (unsigned i = 0; i != 16; ++i)
   4303       ShufIdxs[i] = (i&3)+4;
   4304     break;
   4305   case OP_VSPLTISW2:
   4306     for (unsigned i = 0; i != 16; ++i)
   4307       ShufIdxs[i] = (i&3)+8;
   4308     break;
   4309   case OP_VSPLTISW3:
   4310     for (unsigned i = 0; i != 16; ++i)
   4311       ShufIdxs[i] = (i&3)+12;
   4312     break;
   4313   case OP_VSLDOI4:
   4314     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
   4315   case OP_VSLDOI8:
   4316     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
   4317   case OP_VSLDOI12:
   4318     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
   4319   }
   4320   EVT VT = OpLHS.getValueType();
   4321   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
   4322   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
   4323   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
   4324   return DAG.getNode(ISD::BITCAST, dl, VT, T);
   4325 }
   4326 
   4327 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
   4328 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
   4329 /// return the code it can be lowered into.  Worst case, it can always be
   4330 /// lowered into a vperm.
   4331 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
   4332                                                SelectionDAG &DAG) const {
   4333   DebugLoc dl = Op.getDebugLoc();
   4334   SDValue V1 = Op.getOperand(0);
   4335   SDValue V2 = Op.getOperand(1);
   4336   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
   4337   EVT VT = Op.getValueType();
   4338 
   4339   // Cases that are handled by instructions that take permute immediates
   4340   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
   4341   // selected by the instruction selector.
   4342   if (V2.getOpcode() == ISD::UNDEF) {
   4343     if (PPC::isSplatShuffleMask(SVOp, 1) ||
   4344         PPC::isSplatShuffleMask(SVOp, 2) ||
   4345         PPC::isSplatShuffleMask(SVOp, 4) ||
   4346         PPC::isVPKUWUMShuffleMask(SVOp, true) ||
   4347         PPC::isVPKUHUMShuffleMask(SVOp, true) ||
   4348         PPC::isVSLDOIShuffleMask(SVOp, true) != -1 ||
   4349         PPC::isVMRGLShuffleMask(SVOp, 1, true) ||
   4350         PPC::isVMRGLShuffleMask(SVOp, 2, true) ||
   4351         PPC::isVMRGLShuffleMask(SVOp, 4, true) ||
   4352         PPC::isVMRGHShuffleMask(SVOp, 1, true) ||
   4353         PPC::isVMRGHShuffleMask(SVOp, 2, true) ||
   4354         PPC::isVMRGHShuffleMask(SVOp, 4, true)) {
   4355       return Op;
   4356     }
   4357   }
   4358 
   4359   // Altivec has a variety of "shuffle immediates" that take two vector inputs
   4360   // and produce a fixed permutation.  If any of these match, do not lower to
   4361   // VPERM.
   4362   if (PPC::isVPKUWUMShuffleMask(SVOp, false) ||
   4363       PPC::isVPKUHUMShuffleMask(SVOp, false) ||
   4364       PPC::isVSLDOIShuffleMask(SVOp, false) != -1 ||
   4365       PPC::isVMRGLShuffleMask(SVOp, 1, false) ||
   4366       PPC::isVMRGLShuffleMask(SVOp, 2, false) ||
   4367       PPC::isVMRGLShuffleMask(SVOp, 4, false) ||
   4368       PPC::isVMRGHShuffleMask(SVOp, 1, false) ||
   4369       PPC::isVMRGHShuffleMask(SVOp, 2, false) ||
   4370       PPC::isVMRGHShuffleMask(SVOp, 4, false))
   4371     return Op;
   4372 
   4373   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
   4374   // perfect shuffle table to emit an optimal matching sequence.
   4375   ArrayRef<int> PermMask = SVOp->getMask();
   4376 
   4377   unsigned PFIndexes[4];
   4378   bool isFourElementShuffle = true;
   4379   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
   4380     unsigned EltNo = 8;   // Start out undef.
   4381     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
   4382       if (PermMask[i*4+j] < 0)
   4383         continue;   // Undef, ignore it.
   4384 
   4385       unsigned ByteSource = PermMask[i*4+j];
   4386       if ((ByteSource & 3) != j) {
   4387         isFourElementShuffle = false;
   4388         break;
   4389       }
   4390 
   4391       if (EltNo == 8) {
   4392         EltNo = ByteSource/4;
   4393       } else if (EltNo != ByteSource/4) {
   4394         isFourElementShuffle = false;
   4395         break;
   4396       }
   4397     }
   4398     PFIndexes[i] = EltNo;
   4399   }
   4400 
   4401   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
   4402   // perfect shuffle vector to determine if it is cost effective to do this as
   4403   // discrete instructions, or whether we should use a vperm.
   4404   if (isFourElementShuffle) {
   4405     // Compute the index in the perfect shuffle table.
   4406     unsigned PFTableIndex =
   4407       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
   4408 
   4409     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
   4410     unsigned Cost  = (PFEntry >> 30);
   4411 
   4412     // Determining when to avoid vperm is tricky.  Many things affect the cost
   4413     // of vperm, particularly how many times the perm mask needs to be computed.
   4414     // For example, if the perm mask can be hoisted out of a loop or is already
   4415     // used (perhaps because there are multiple permutes with the same shuffle
   4416     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
   4417     // the loop requires an extra register.
   4418     //
   4419     // As a compromise, we only emit discrete instructions if the shuffle can be
   4420     // generated in 3 or fewer operations.  When we have loop information
   4421     // available, if this block is within a loop, we should avoid using vperm
   4422     // for 3-operation perms and use a constant pool load instead.
   4423     if (Cost < 3)
   4424       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
   4425   }
   4426 
   4427   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
   4428   // vector that will get spilled to the constant pool.
   4429   if (V2.getOpcode() == ISD::UNDEF) V2 = V1;
   4430 
   4431   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
   4432   // that it is in input element units, not in bytes.  Convert now.
   4433   EVT EltVT = V1.getValueType().getVectorElementType();
   4434   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
   4435 
   4436   SmallVector<SDValue, 16> ResultMask;
   4437   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
   4438     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
   4439 
   4440     for (unsigned j = 0; j != BytesPerElement; ++j)
   4441       ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
   4442                                            MVT::i32));
   4443   }
   4444 
   4445   SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8,
   4446                                     &ResultMask[0], ResultMask.size());
   4447   return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), V1, V2, VPermMask);
   4448 }
   4449 
   4450 /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an
   4451 /// altivec comparison.  If it is, return true and fill in Opc/isDot with
   4452 /// information about the intrinsic.
   4453 static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc,
   4454                                   bool &isDot) {
   4455   unsigned IntrinsicID =
   4456     cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
   4457   CompareOpc = -1;
   4458   isDot = false;
   4459   switch (IntrinsicID) {
   4460   default: return false;
   4461     // Comparison predicates.
   4462   case Intrinsic::ppc_altivec_vcmpbfp_p:  CompareOpc = 966; isDot = 1; break;
   4463   case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1; break;
   4464   case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc =   6; isDot = 1; break;
   4465   case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc =  70; isDot = 1; break;
   4466   case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1; break;
   4467   case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1; break;
   4468   case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1; break;
   4469   case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1; break;
   4470   case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1; break;
   4471   case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1; break;
   4472   case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1; break;
   4473   case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1; break;
   4474   case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1; break;
   4475 
   4476     // Normal Comparisons.
   4477   case Intrinsic::ppc_altivec_vcmpbfp:    CompareOpc = 966; isDot = 0; break;
   4478   case Intrinsic::ppc_altivec_vcmpeqfp:   CompareOpc = 198; isDot = 0; break;
   4479   case Intrinsic::ppc_altivec_vcmpequb:   CompareOpc =   6; isDot = 0; break;
   4480   case Intrinsic::ppc_altivec_vcmpequh:   CompareOpc =  70; isDot = 0; break;
   4481   case Intrinsic::ppc_altivec_vcmpequw:   CompareOpc = 134; isDot = 0; break;
   4482   case Intrinsic::ppc_altivec_vcmpgefp:   CompareOpc = 454; isDot = 0; break;
   4483   case Intrinsic::ppc_altivec_vcmpgtfp:   CompareOpc = 710; isDot = 0; break;
   4484   case Intrinsic::ppc_altivec_vcmpgtsb:   CompareOpc = 774; isDot = 0; break;
   4485   case Intrinsic::ppc_altivec_vcmpgtsh:   CompareOpc = 838; isDot = 0; break;
   4486   case Intrinsic::ppc_altivec_vcmpgtsw:   CompareOpc = 902; isDot = 0; break;
   4487   case Intrinsic::ppc_altivec_vcmpgtub:   CompareOpc = 518; isDot = 0; break;
   4488   case Intrinsic::ppc_altivec_vcmpgtuh:   CompareOpc = 582; isDot = 0; break;
   4489   case Intrinsic::ppc_altivec_vcmpgtuw:   CompareOpc = 646; isDot = 0; break;
   4490   }
   4491   return true;
   4492 }
   4493 
   4494 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
   4495 /// lower, do it, otherwise return null.
   4496 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   4497                                                    SelectionDAG &DAG) const {
   4498   // If this is a lowered altivec predicate compare, CompareOpc is set to the
   4499   // opcode number of the comparison.
   4500   DebugLoc dl = Op.getDebugLoc();
   4501   int CompareOpc;
   4502   bool isDot;
   4503   if (!getAltivecCompareInfo(Op, CompareOpc, isDot))
   4504     return SDValue();    // Don't custom lower most intrinsics.
   4505 
   4506   // If this is a non-dot comparison, make the VCMP node and we are done.
   4507   if (!isDot) {
   4508     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
   4509                               Op.getOperand(1), Op.getOperand(2),
   4510                               DAG.getConstant(CompareOpc, MVT::i32));
   4511     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
   4512   }
   4513 
   4514   // Create the PPCISD altivec 'dot' comparison node.
   4515   SDValue Ops[] = {
   4516     Op.getOperand(2),  // LHS
   4517     Op.getOperand(3),  // RHS
   4518     DAG.getConstant(CompareOpc, MVT::i32)
   4519   };
   4520   std::vector<EVT> VTs;
   4521   VTs.push_back(Op.getOperand(2).getValueType());
   4522   VTs.push_back(MVT::Glue);
   4523   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
   4524 
   4525   // Now that we have the comparison, emit a copy from the CR to a GPR.
   4526   // This is flagged to the above dot comparison.
   4527   SDValue Flags = DAG.getNode(PPCISD::MFCR, dl, MVT::i32,
   4528                                 DAG.getRegister(PPC::CR6, MVT::i32),
   4529                                 CompNode.getValue(1));
   4530 
   4531   // Unpack the result based on how the target uses it.
   4532   unsigned BitNo;   // Bit # of CR6.
   4533   bool InvertBit;   // Invert result?
   4534   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
   4535   default:  // Can't happen, don't crash on invalid number though.
   4536   case 0:   // Return the value of the EQ bit of CR6.
   4537     BitNo = 0; InvertBit = false;
   4538     break;
   4539   case 1:   // Return the inverted value of the EQ bit of CR6.
   4540     BitNo = 0; InvertBit = true;
   4541     break;
   4542   case 2:   // Return the value of the LT bit of CR6.
   4543     BitNo = 2; InvertBit = false;
   4544     break;
   4545   case 3:   // Return the inverted value of the LT bit of CR6.
   4546     BitNo = 2; InvertBit = true;
   4547     break;
   4548   }
   4549 
   4550   // Shift the bit into the low position.
   4551   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
   4552                       DAG.getConstant(8-(3-BitNo), MVT::i32));
   4553   // Isolate the bit.
   4554   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
   4555                       DAG.getConstant(1, MVT::i32));
   4556 
   4557   // If we are supposed to, toggle the bit.
   4558   if (InvertBit)
   4559     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
   4560                         DAG.getConstant(1, MVT::i32));
   4561   return Flags;
   4562 }
   4563 
   4564 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
   4565                                                    SelectionDAG &DAG) const {
   4566   DebugLoc dl = Op.getDebugLoc();
   4567   // Create a stack slot that is 16-byte aligned.
   4568   MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
   4569   int FrameIdx = FrameInfo->CreateStackObject(16, 16, false);
   4570   EVT PtrVT = getPointerTy();
   4571   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
   4572 
   4573   // Store the input value into Value#0 of the stack slot.
   4574   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
   4575                                Op.getOperand(0), FIdx, MachinePointerInfo(),
   4576                                false, false, 0);
   4577   // Load it out.
   4578   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo(),
   4579                      false, false, false, 0);
   4580 }
   4581 
   4582 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
   4583   DebugLoc dl = Op.getDebugLoc();
   4584   if (Op.getValueType() == MVT::v4i32) {
   4585     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
   4586 
   4587     SDValue Zero  = BuildSplatI(  0, 1, MVT::v4i32, DAG, dl);
   4588     SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt.
   4589 
   4590     SDValue RHSSwap =   // = vrlw RHS, 16
   4591       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
   4592 
   4593     // Shrinkify inputs to v8i16.
   4594     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
   4595     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
   4596     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
   4597 
   4598     // Low parts multiplied together, generating 32-bit results (we ignore the
   4599     // top parts).
   4600     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
   4601                                         LHS, RHS, DAG, dl, MVT::v4i32);
   4602 
   4603     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
   4604                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
   4605     // Shift the high parts up 16 bits.
   4606     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
   4607                               Neg16, DAG, dl);
   4608     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
   4609   } else if (Op.getValueType() == MVT::v8i16) {
   4610     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
   4611 
   4612     SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl);
   4613 
   4614     return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm,
   4615                             LHS, RHS, Zero, DAG, dl);
   4616   } else if (Op.getValueType() == MVT::v16i8) {
   4617     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
   4618 
   4619     // Multiply the even 8-bit parts, producing 16-bit sums.
   4620     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
   4621                                            LHS, RHS, DAG, dl, MVT::v8i16);
   4622     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
   4623 
   4624     // Multiply the odd 8-bit parts, producing 16-bit sums.
   4625     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
   4626                                           LHS, RHS, DAG, dl, MVT::v8i16);
   4627     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
   4628 
   4629     // Merge the results together.
   4630     int Ops[16];
   4631     for (unsigned i = 0; i != 8; ++i) {
   4632       Ops[i*2  ] = 2*i+1;
   4633       Ops[i*2+1] = 2*i+1+16;
   4634     }
   4635     return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
   4636   } else {
   4637     llvm_unreachable("Unknown mul to lower!");
   4638   }
   4639 }
   4640 
   4641 /// LowerOperation - Provide custom lowering hooks for some operations.
   4642 ///
   4643 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
   4644   switch (Op.getOpcode()) {
   4645   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
   4646   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
   4647   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
   4648   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
   4649   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
   4650   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
   4651   case ISD::SETCC:              return LowerSETCC(Op, DAG);
   4652   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
   4653   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
   4654   case ISD::VASTART:
   4655     return LowerVASTART(Op, DAG, PPCSubTarget);
   4656 
   4657   case ISD::VAARG:
   4658     return LowerVAARG(Op, DAG, PPCSubTarget);
   4659 
   4660   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG, PPCSubTarget);
   4661   case ISD::DYNAMIC_STACKALLOC:
   4662     return LowerDYNAMIC_STACKALLOC(Op, DAG, PPCSubTarget);
   4663 
   4664   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
   4665   case ISD::FP_TO_UINT:
   4666   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG,
   4667                                                        Op.getDebugLoc());
   4668   case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
   4669   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
   4670 
   4671   // Lower 64-bit shifts.
   4672   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
   4673   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
   4674   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
   4675 
   4676   // Vector-related lowering.
   4677   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
   4678   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
   4679   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
   4680   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
   4681   case ISD::MUL:                return LowerMUL(Op, DAG);
   4682 
   4683   // Frame & Return address.
   4684   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
   4685   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
   4686   }
   4687 }
   4688 
   4689 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
   4690                                            SmallVectorImpl<SDValue>&Results,
   4691                                            SelectionDAG &DAG) const {
   4692   const TargetMachine &TM = getTargetMachine();
   4693   DebugLoc dl = N->getDebugLoc();
   4694   switch (N->getOpcode()) {
   4695   default:
   4696     llvm_unreachable("Do not know how to custom type legalize this operation!");
   4697   case ISD::VAARG: {
   4698     if (!TM.getSubtarget<PPCSubtarget>().isSVR4ABI()
   4699         || TM.getSubtarget<PPCSubtarget>().isPPC64())
   4700       return;
   4701 
   4702     EVT VT = N->getValueType(0);
   4703 
   4704     if (VT == MVT::i64) {
   4705       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG, PPCSubTarget);
   4706 
   4707       Results.push_back(NewNode);
   4708       Results.push_back(NewNode.getValue(1));
   4709     }
   4710     return;
   4711   }
   4712   case ISD::FP_ROUND_INREG: {
   4713     assert(N->getValueType(0) == MVT::ppcf128);
   4714     assert(N->getOperand(0).getValueType() == MVT::ppcf128);
   4715     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
   4716                              MVT::f64, N->getOperand(0),
   4717                              DAG.getIntPtrConstant(0));
   4718     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
   4719                              MVT::f64, N->getOperand(0),
   4720                              DAG.getIntPtrConstant(1));
   4721 
   4722     // This sequence changes FPSCR to do round-to-zero, adds the two halves
   4723     // of the long double, and puts FPSCR back the way it was.  We do not
   4724     // actually model FPSCR.
   4725     std::vector<EVT> NodeTys;
   4726     SDValue Ops[4], Result, MFFSreg, InFlag, FPreg;
   4727 
   4728     NodeTys.push_back(MVT::f64);   // Return register
   4729     NodeTys.push_back(MVT::Glue);    // Returns a flag for later insns
   4730     Result = DAG.getNode(PPCISD::MFFS, dl, NodeTys, &InFlag, 0);
   4731     MFFSreg = Result.getValue(0);
   4732     InFlag = Result.getValue(1);
   4733 
   4734     NodeTys.clear();
   4735     NodeTys.push_back(MVT::Glue);   // Returns a flag
   4736     Ops[0] = DAG.getConstant(31, MVT::i32);
   4737     Ops[1] = InFlag;
   4738     Result = DAG.getNode(PPCISD::MTFSB1, dl, NodeTys, Ops, 2);
   4739     InFlag = Result.getValue(0);
   4740 
   4741     NodeTys.clear();
   4742     NodeTys.push_back(MVT::Glue);   // Returns a flag
   4743     Ops[0] = DAG.getConstant(30, MVT::i32);
   4744     Ops[1] = InFlag;
   4745     Result = DAG.getNode(PPCISD::MTFSB0, dl, NodeTys, Ops, 2);
   4746     InFlag = Result.getValue(0);
   4747 
   4748     NodeTys.clear();
   4749     NodeTys.push_back(MVT::f64);    // result of add
   4750     NodeTys.push_back(MVT::Glue);   // Returns a flag
   4751     Ops[0] = Lo;
   4752     Ops[1] = Hi;
   4753     Ops[2] = InFlag;
   4754     Result = DAG.getNode(PPCISD::FADDRTZ, dl, NodeTys, Ops, 3);
   4755     FPreg = Result.getValue(0);
   4756     InFlag = Result.getValue(1);
   4757 
   4758     NodeTys.clear();
   4759     NodeTys.push_back(MVT::f64);
   4760     Ops[0] = DAG.getConstant(1, MVT::i32);
   4761     Ops[1] = MFFSreg;
   4762     Ops[2] = FPreg;
   4763     Ops[3] = InFlag;
   4764     Result = DAG.getNode(PPCISD::MTFSF, dl, NodeTys, Ops, 4);
   4765     FPreg = Result.getValue(0);
   4766 
   4767     // We know the low half is about to be thrown away, so just use something
   4768     // convenient.
   4769     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::ppcf128,
   4770                                 FPreg, FPreg));
   4771     return;
   4772   }
   4773   case ISD::FP_TO_SINT:
   4774     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
   4775     return;
   4776   }
   4777 }
   4778 
   4779 
   4780 //===----------------------------------------------------------------------===//
   4781 //  Other Lowering Code
   4782 //===----------------------------------------------------------------------===//
   4783 
   4784 MachineBasicBlock *
   4785 PPCTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
   4786                                     bool is64bit, unsigned BinOpcode) const {
   4787   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
   4788   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
   4789 
   4790   const BasicBlock *LLVM_BB = BB->getBasicBlock();
   4791   MachineFunction *F = BB->getParent();
   4792   MachineFunction::iterator It = BB;
   4793   ++It;
   4794 
   4795   unsigned dest = MI->getOperand(0).getReg();
   4796   unsigned ptrA = MI->getOperand(1).getReg();
   4797   unsigned ptrB = MI->getOperand(2).getReg();
   4798   unsigned incr = MI->getOperand(3).getReg();
   4799   DebugLoc dl = MI->getDebugLoc();
   4800 
   4801   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
   4802   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
   4803   F->insert(It, loopMBB);
   4804   F->insert(It, exitMBB);
   4805   exitMBB->splice(exitMBB->begin(), BB,
   4806                   llvm::next(MachineBasicBlock::iterator(MI)),
   4807                   BB->end());
   4808   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
   4809 
   4810   MachineRegisterInfo &RegInfo = F->getRegInfo();
   4811   unsigned TmpReg = (!BinOpcode) ? incr :
   4812     RegInfo.createVirtualRegister(
   4813        is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
   4814                  (const TargetRegisterClass *) &PPC::GPRCRegClass);
   4815 
   4816   //  thisMBB:
   4817   //   ...
   4818   //   fallthrough --> loopMBB
   4819   BB->addSuccessor(loopMBB);
   4820 
   4821   //  loopMBB:
   4822   //   l[wd]arx dest, ptr
   4823   //   add r0, dest, incr
   4824   //   st[wd]cx. r0, ptr
   4825   //   bne- loopMBB
   4826   //   fallthrough --> exitMBB
   4827   BB = loopMBB;
   4828   BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
   4829     .addReg(ptrA).addReg(ptrB);
   4830   if (BinOpcode)
   4831     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
   4832   BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
   4833     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
   4834   BuildMI(BB, dl, TII->get(PPC::BCC))
   4835     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
   4836   BB->addSuccessor(loopMBB);
   4837   BB->addSuccessor(exitMBB);
   4838 
   4839   //  exitMBB:
   4840   //   ...
   4841   BB = exitMBB;
   4842   return BB;
   4843 }
   4844 
   4845 MachineBasicBlock *
   4846 PPCTargetLowering::EmitPartwordAtomicBinary(MachineInstr *MI,
   4847                                             MachineBasicBlock *BB,
   4848                                             bool is8bit,    // operation
   4849                                             unsigned BinOpcode) const {
   4850   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
   4851   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
   4852   // In 64 bit mode we have to use 64 bits for addresses, even though the
   4853   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
   4854   // registers without caring whether they're 32 or 64, but here we're
   4855   // doing actual arithmetic on the addresses.
   4856   bool is64bit = PPCSubTarget.isPPC64();
   4857   unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0;
   4858 
   4859   const BasicBlock *LLVM_BB = BB->getBasicBlock();
   4860   MachineFunction *F = BB->getParent();
   4861   MachineFunction::iterator It = BB;
   4862   ++It;
   4863 
   4864   unsigned dest = MI->getOperand(0).getReg();
   4865   unsigned ptrA = MI->getOperand(1).getReg();
   4866   unsigned ptrB = MI->getOperand(2).getReg();
   4867   unsigned incr = MI->getOperand(3).getReg();
   4868   DebugLoc dl = MI->getDebugLoc();
   4869 
   4870   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
   4871   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
   4872   F->insert(It, loopMBB);
   4873   F->insert(It, exitMBB);
   4874   exitMBB->splice(exitMBB->begin(), BB,
   4875                   llvm::next(MachineBasicBlock::iterator(MI)),
   4876                   BB->end());
   4877   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
   4878 
   4879   MachineRegisterInfo &RegInfo = F->getRegInfo();
   4880   const TargetRegisterClass *RC =
   4881     is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
   4882               (const TargetRegisterClass *) &PPC::GPRCRegClass;
   4883   unsigned PtrReg = RegInfo.createVirtualRegister(RC);
   4884   unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
   4885   unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
   4886   unsigned Incr2Reg = RegInfo.createVirtualRegister(RC);
   4887   unsigned MaskReg = RegInfo.createVirtualRegister(RC);
   4888   unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
   4889   unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
   4890   unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
   4891   unsigned Tmp3Reg = RegInfo.createVirtualRegister(RC);
   4892   unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
   4893   unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
   4894   unsigned Ptr1Reg;
   4895   unsigned TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(RC);
   4896 
   4897   //  thisMBB:
   4898   //   ...
   4899   //   fallthrough --> loopMBB
   4900   BB->addSuccessor(loopMBB);
   4901 
   4902   // The 4-byte load must be aligned, while a char or short may be
   4903   // anywhere in the word.  Hence all this nasty bookkeeping code.
   4904   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
   4905   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
   4906   //   xori shift, shift1, 24 [16]
   4907   //   rlwinm ptr, ptr1, 0, 0, 29
   4908   //   slw incr2, incr, shift
   4909   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
   4910   //   slw mask, mask2, shift
   4911   //  loopMBB:
   4912   //   lwarx tmpDest, ptr
   4913   //   add tmp, tmpDest, incr2
   4914   //   andc tmp2, tmpDest, mask
   4915   //   and tmp3, tmp, mask
   4916   //   or tmp4, tmp3, tmp2
   4917   //   stwcx. tmp4, ptr
   4918   //   bne- loopMBB
   4919   //   fallthrough --> exitMBB
   4920   //   srw dest, tmpDest, shift
   4921   if (ptrA != ZeroReg) {
   4922     Ptr1Reg = RegInfo.createVirtualRegister(RC);
   4923     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
   4924       .addReg(ptrA).addReg(ptrB);
   4925   } else {
   4926     Ptr1Reg = ptrB;
   4927   }
   4928   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
   4929       .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
   4930   BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
   4931       .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
   4932   if (is64bit)
   4933     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
   4934       .addReg(Ptr1Reg).addImm(0).addImm(61);
   4935   else
   4936     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
   4937       .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
   4938   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg)
   4939       .addReg(incr).addReg(ShiftReg);
   4940   if (is8bit)
   4941     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
   4942   else {
   4943     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
   4944     BuildMI(BB, dl, TII->get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).addImm(65535);
   4945   }
   4946   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
   4947       .addReg(Mask2Reg).addReg(ShiftReg);
   4948 
   4949   BB = loopMBB;
   4950   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
   4951     .addReg(ZeroReg).addReg(PtrReg);
   4952   if (BinOpcode)
   4953     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
   4954       .addReg(Incr2Reg).addReg(TmpDestReg);
   4955   BuildMI(BB, dl, TII->get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
   4956     .addReg(TmpDestReg).addReg(MaskReg);
   4957   BuildMI(BB, dl, TII->get(is64bit ? PPC::AND8 : PPC::AND), Tmp3Reg)
   4958     .addReg(TmpReg).addReg(MaskReg);
   4959   BuildMI(BB, dl, TII->get(is64bit ? PPC::OR8 : PPC::OR), Tmp4Reg)
   4960     .addReg(Tmp3Reg).addReg(Tmp2Reg);
   4961   BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
   4962     .addReg(Tmp4Reg).addReg(ZeroReg).addReg(PtrReg);
   4963   BuildMI(BB, dl, TII->get(PPC::BCC))
   4964     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
   4965   BB->addSuccessor(loopMBB);
   4966   BB->addSuccessor(exitMBB);
   4967 
   4968   //  exitMBB:
   4969   //   ...
   4970   BB = exitMBB;
   4971   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest).addReg(TmpDestReg)
   4972     .addReg(ShiftReg);
   4973   return BB;
   4974 }
   4975 
   4976 MachineBasicBlock *
   4977 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
   4978                                                MachineBasicBlock *BB) const {
   4979   const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
   4980 
   4981   // To "insert" these instructions we actually have to insert their
   4982   // control-flow patterns.
   4983   const BasicBlock *LLVM_BB = BB->getBasicBlock();
   4984   MachineFunction::iterator It = BB;
   4985   ++It;
   4986 
   4987   MachineFunction *F = BB->getParent();
   4988 
   4989   if (PPCSubTarget.hasISEL() && (MI->getOpcode() == PPC::SELECT_CC_I4 ||
   4990                                  MI->getOpcode() == PPC::SELECT_CC_I8)) {
   4991     unsigned OpCode = MI->getOpcode() == PPC::SELECT_CC_I8 ?
   4992                                          PPC::ISEL8 : PPC::ISEL;
   4993     unsigned SelectPred = MI->getOperand(4).getImm();
   4994     DebugLoc dl = MI->getDebugLoc();
   4995 
   4996     // The SelectPred is ((BI << 5) | BO) for a BCC
   4997     unsigned BO = SelectPred & 0xF;
   4998     assert((BO == 12 || BO == 4) && "invalid predicate BO field for isel");
   4999 
   5000     unsigned TrueOpNo, FalseOpNo;
   5001     if (BO == 12) {
   5002       TrueOpNo = 2;
   5003       FalseOpNo = 3;
   5004     } else {
   5005       TrueOpNo = 3;
   5006       FalseOpNo = 2;
   5007       SelectPred = PPC::InvertPredicate((PPC::Predicate)SelectPred);
   5008     }
   5009 
   5010     BuildMI(*BB, MI, dl, TII->get(OpCode), MI->getOperand(0).getReg())
   5011       .addReg(MI->getOperand(TrueOpNo).getReg())
   5012       .addReg(MI->getOperand(FalseOpNo).getReg())
   5013       .addImm(SelectPred).addReg(MI->getOperand(1).getReg());
   5014   } else if (MI->getOpcode() == PPC::SELECT_CC_I4 ||
   5015              MI->getOpcode() == PPC::SELECT_CC_I8 ||
   5016              MI->getOpcode() == PPC::SELECT_CC_F4 ||
   5017              MI->getOpcode() == PPC::SELECT_CC_F8 ||
   5018              MI->getOpcode() == PPC::SELECT_CC_VRRC) {
   5019 
   5020 
   5021     // The incoming instruction knows the destination vreg to set, the
   5022     // condition code register to branch on, the true/false values to
   5023     // select between, and a branch opcode to use.
   5024 
   5025     //  thisMBB:
   5026     //  ...
   5027     //   TrueVal = ...
   5028     //   cmpTY ccX, r1, r2
   5029     //   bCC copy1MBB
   5030     //   fallthrough --> copy0MBB
   5031     MachineBasicBlock *thisMBB = BB;
   5032     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
   5033     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
   5034     unsigned SelectPred = MI->getOperand(4).getImm();
   5035     DebugLoc dl = MI->getDebugLoc();
   5036     F->insert(It, copy0MBB);
   5037     F->insert(It, sinkMBB);
   5038 
   5039     // Transfer the remainder of BB and its successor edges to sinkMBB.
   5040     sinkMBB->splice(sinkMBB->begin(), BB,
   5041                     llvm::next(MachineBasicBlock::iterator(MI)),
   5042                     BB->end());
   5043     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
   5044 
   5045     // Next, add the true and fallthrough blocks as its successors.
   5046     BB->addSuccessor(copy0MBB);
   5047     BB->addSuccessor(sinkMBB);
   5048 
   5049     BuildMI(BB, dl, TII->get(PPC::BCC))
   5050       .addImm(SelectPred).addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
   5051 
   5052     //  copy0MBB:
   5053     //   %FalseValue = ...
   5054     //   # fallthrough to sinkMBB
   5055     BB = copy0MBB;
   5056 
   5057     // Update machine-CFG edges
   5058     BB->addSuccessor(sinkMBB);
   5059 
   5060     //  sinkMBB:
   5061     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
   5062     //  ...
   5063     BB = sinkMBB;
   5064     BuildMI(*BB, BB->begin(), dl,
   5065             TII->get(PPC::PHI), MI->getOperand(0).getReg())
   5066       .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
   5067       .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
   5068   }
   5069   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
   5070     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
   5071   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
   5072     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
   5073   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
   5074     BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4);
   5075   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
   5076     BB = EmitAtomicBinary(MI, BB, true, PPC::ADD8);
   5077 
   5078   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
   5079     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
   5080   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
   5081     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
   5082   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
   5083     BB = EmitAtomicBinary(MI, BB, false, PPC::AND);
   5084   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
   5085     BB = EmitAtomicBinary(MI, BB, true, PPC::AND8);
   5086 
   5087   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
   5088     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
   5089   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
   5090     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
   5091   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
   5092     BB = EmitAtomicBinary(MI, BB, false, PPC::OR);
   5093   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
   5094     BB = EmitAtomicBinary(MI, BB, true, PPC::OR8);
   5095 
   5096   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
   5097     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
   5098   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
   5099     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
   5100   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
   5101     BB = EmitAtomicBinary(MI, BB, false, PPC::XOR);
   5102   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
   5103     BB = EmitAtomicBinary(MI, BB, true, PPC::XOR8);
   5104 
   5105   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
   5106     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ANDC);
   5107   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
   5108     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ANDC);
   5109   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
   5110     BB = EmitAtomicBinary(MI, BB, false, PPC::ANDC);
   5111   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
   5112     BB = EmitAtomicBinary(MI, BB, true, PPC::ANDC8);
   5113 
   5114   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
   5115     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
   5116   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
   5117     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
   5118   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
   5119     BB = EmitAtomicBinary(MI, BB, false, PPC::SUBF);
   5120   else if (MI->getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
   5121     BB = EmitAtomicBinary(MI, BB, true, PPC::SUBF8);
   5122 
   5123   else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I8)
   5124     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
   5125   else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I16)
   5126     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
   5127   else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I32)
   5128     BB = EmitAtomicBinary(MI, BB, false, 0);
   5129   else if (MI->getOpcode() == PPC::ATOMIC_SWAP_I64)
   5130     BB = EmitAtomicBinary(MI, BB, true, 0);
   5131 
   5132   else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
   5133            MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64) {
   5134     bool is64bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
   5135 
   5136     unsigned dest   = MI->getOperand(0).getReg();
   5137     unsigned ptrA   = MI->getOperand(1).getReg();
   5138     unsigned ptrB   = MI->getOperand(2).getReg();
   5139     unsigned oldval = MI->getOperand(3).getReg();
   5140     unsigned newval = MI->getOperand(4).getReg();
   5141     DebugLoc dl     = MI->getDebugLoc();
   5142 
   5143     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
   5144     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
   5145     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
   5146     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
   5147     F->insert(It, loop1MBB);
   5148     F->insert(It, loop2MBB);
   5149     F->insert(It, midMBB);
   5150     F->insert(It, exitMBB);
   5151     exitMBB->splice(exitMBB->begin(), BB,
   5152                     llvm::next(MachineBasicBlock::iterator(MI)),
   5153                     BB->end());
   5154     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
   5155 
   5156     //  thisMBB:
   5157     //   ...
   5158     //   fallthrough --> loopMBB
   5159     BB->addSuccessor(loop1MBB);
   5160 
   5161     // loop1MBB:
   5162     //   l[wd]arx dest, ptr
   5163     //   cmp[wd] dest, oldval
   5164     //   bne- midMBB
   5165     // loop2MBB:
   5166     //   st[wd]cx. newval, ptr
   5167     //   bne- loopMBB
   5168     //   b exitBB
   5169     // midMBB:
   5170     //   st[wd]cx. dest, ptr
   5171     // exitBB:
   5172     BB = loop1MBB;
   5173     BuildMI(BB, dl, TII->get(is64bit ? PPC::LDARX : PPC::LWARX), dest)
   5174       .addReg(ptrA).addReg(ptrB);
   5175     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
   5176       .addReg(oldval).addReg(dest);
   5177     BuildMI(BB, dl, TII->get(PPC::BCC))
   5178       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
   5179     BB->addSuccessor(loop2MBB);
   5180     BB->addSuccessor(midMBB);
   5181 
   5182     BB = loop2MBB;
   5183     BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
   5184       .addReg(newval).addReg(ptrA).addReg(ptrB);
   5185     BuildMI(BB, dl, TII->get(PPC::BCC))
   5186       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
   5187     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
   5188     BB->addSuccessor(loop1MBB);
   5189     BB->addSuccessor(exitMBB);
   5190 
   5191     BB = midMBB;
   5192     BuildMI(BB, dl, TII->get(is64bit ? PPC::STDCX : PPC::STWCX))
   5193       .addReg(dest).addReg(ptrA).addReg(ptrB);
   5194     BB->addSuccessor(exitMBB);
   5195 
   5196     //  exitMBB:
   5197     //   ...
   5198     BB = exitMBB;
   5199   } else if (MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
   5200              MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
   5201     // We must use 64-bit registers for addresses when targeting 64-bit,
   5202     // since we're actually doing arithmetic on them.  Other registers
   5203     // can be 32-bit.
   5204     bool is64bit = PPCSubTarget.isPPC64();
   5205     bool is8bit = MI->getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
   5206 
   5207     unsigned dest   = MI->getOperand(0).getReg();
   5208     unsigned ptrA   = MI->getOperand(1).getReg();
   5209     unsigned ptrB   = MI->getOperand(2).getReg();
   5210     unsigned oldval = MI->getOperand(3).getReg();
   5211     unsigned newval = MI->getOperand(4).getReg();
   5212     DebugLoc dl     = MI->getDebugLoc();
   5213 
   5214     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
   5215     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
   5216     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
   5217     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
   5218     F->insert(It, loop1MBB);
   5219     F->insert(It, loop2MBB);
   5220     F->insert(It, midMBB);
   5221     F->insert(It, exitMBB);
   5222     exitMBB->splice(exitMBB->begin(), BB,
   5223                     llvm::next(MachineBasicBlock::iterator(MI)),
   5224                     BB->end());
   5225     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
   5226 
   5227     MachineRegisterInfo &RegInfo = F->getRegInfo();
   5228     const TargetRegisterClass *RC =
   5229       is64bit ? (const TargetRegisterClass *) &PPC::G8RCRegClass :
   5230                 (const TargetRegisterClass *) &PPC::GPRCRegClass;
   5231     unsigned PtrReg = RegInfo.createVirtualRegister(RC);
   5232     unsigned Shift1Reg = RegInfo.createVirtualRegister(RC);
   5233     unsigned ShiftReg = RegInfo.createVirtualRegister(RC);
   5234     unsigned NewVal2Reg = RegInfo.createVirtualRegister(RC);
   5235     unsigned NewVal3Reg = RegInfo.createVirtualRegister(RC);
   5236     unsigned OldVal2Reg = RegInfo.createVirtualRegister(RC);
   5237     unsigned OldVal3Reg = RegInfo.createVirtualRegister(RC);
   5238     unsigned MaskReg = RegInfo.createVirtualRegister(RC);
   5239     unsigned Mask2Reg = RegInfo.createVirtualRegister(RC);
   5240     unsigned Mask3Reg = RegInfo.createVirtualRegister(RC);
   5241     unsigned Tmp2Reg = RegInfo.createVirtualRegister(RC);
   5242     unsigned Tmp4Reg = RegInfo.createVirtualRegister(RC);
   5243     unsigned TmpDestReg = RegInfo.createVirtualRegister(RC);
   5244     unsigned Ptr1Reg;
   5245     unsigned TmpReg = RegInfo.createVirtualRegister(RC);
   5246     unsigned ZeroReg = is64bit ? PPC::X0 : PPC::R0;
   5247     //  thisMBB:
   5248     //   ...
   5249     //   fallthrough --> loopMBB
   5250     BB->addSuccessor(loop1MBB);
   5251 
   5252     // The 4-byte load must be aligned, while a char or short may be
   5253     // anywhere in the word.  Hence all this nasty bookkeeping code.
   5254     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
   5255     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
   5256     //   xori shift, shift1, 24 [16]
   5257     //   rlwinm ptr, ptr1, 0, 0, 29
   5258     //   slw newval2, newval, shift
   5259     //   slw oldval2, oldval,shift
   5260     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
   5261     //   slw mask, mask2, shift
   5262     //   and newval3, newval2, mask
   5263     //   and oldval3, oldval2, mask
   5264     // loop1MBB:
   5265     //   lwarx tmpDest, ptr
   5266     //   and tmp, tmpDest, mask
   5267     //   cmpw tmp, oldval3
   5268     //   bne- midMBB
   5269     // loop2MBB:
   5270     //   andc tmp2, tmpDest, mask
   5271     //   or tmp4, tmp2, newval3
   5272     //   stwcx. tmp4, ptr
   5273     //   bne- loop1MBB
   5274     //   b exitBB
   5275     // midMBB:
   5276     //   stwcx. tmpDest, ptr
   5277     // exitBB:
   5278     //   srw dest, tmpDest, shift
   5279     if (ptrA != ZeroReg) {
   5280       Ptr1Reg = RegInfo.createVirtualRegister(RC);
   5281       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
   5282         .addReg(ptrA).addReg(ptrB);
   5283     } else {
   5284       Ptr1Reg = ptrB;
   5285     }
   5286     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
   5287         .addImm(3).addImm(27).addImm(is8bit ? 28 : 27);
   5288     BuildMI(BB, dl, TII->get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
   5289         .addReg(Shift1Reg).addImm(is8bit ? 24 : 16);
   5290     if (is64bit)
   5291       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
   5292         .addReg(Ptr1Reg).addImm(0).addImm(61);
   5293     else
   5294       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
   5295         .addReg(Ptr1Reg).addImm(0).addImm(0).addImm(29);
   5296     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
   5297         .addReg(newval).addReg(ShiftReg);
   5298     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
   5299         .addReg(oldval).addReg(ShiftReg);
   5300     if (is8bit)
   5301       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
   5302     else {
   5303       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
   5304       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
   5305         .addReg(Mask3Reg).addImm(65535);
   5306     }
   5307     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
   5308         .addReg(Mask2Reg).addReg(ShiftReg);
   5309     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
   5310         .addReg(NewVal2Reg).addReg(MaskReg);
   5311     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
   5312         .addReg(OldVal2Reg).addReg(MaskReg);
   5313 
   5314     BB = loop1MBB;
   5315     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
   5316         .addReg(ZeroReg).addReg(PtrReg);
   5317     BuildMI(BB, dl, TII->get(PPC::AND),TmpReg)
   5318         .addReg(TmpDestReg).addReg(MaskReg);
   5319     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
   5320         .addReg(TmpReg).addReg(OldVal3Reg);
   5321     BuildMI(BB, dl, TII->get(PPC::BCC))
   5322         .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(midMBB);
   5323     BB->addSuccessor(loop2MBB);
   5324     BB->addSuccessor(midMBB);
   5325 
   5326     BB = loop2MBB;
   5327     BuildMI(BB, dl, TII->get(PPC::ANDC),Tmp2Reg)
   5328         .addReg(TmpDestReg).addReg(MaskReg);
   5329     BuildMI(BB, dl, TII->get(PPC::OR),Tmp4Reg)
   5330         .addReg(Tmp2Reg).addReg(NewVal3Reg);
   5331     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(Tmp4Reg)
   5332         .addReg(ZeroReg).addReg(PtrReg);
   5333     BuildMI(BB, dl, TII->get(PPC::BCC))
   5334       .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loop1MBB);
   5335     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
   5336     BB->addSuccessor(loop1MBB);
   5337     BB->addSuccessor(exitMBB);
   5338 
   5339     BB = midMBB;
   5340     BuildMI(BB, dl, TII->get(PPC::STWCX)).addReg(TmpDestReg)
   5341       .addReg(ZeroReg).addReg(PtrReg);
   5342     BB->addSuccessor(exitMBB);
   5343 
   5344     //  exitMBB:
   5345     //   ...
   5346     BB = exitMBB;
   5347     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW),dest).addReg(TmpReg)
   5348       .addReg(ShiftReg);
   5349   } else {
   5350     llvm_unreachable("Unexpected instr type to insert");
   5351   }
   5352 
   5353   MI->eraseFromParent();   // The pseudo instruction is gone now.
   5354   return BB;
   5355 }
   5356 
   5357 //===----------------------------------------------------------------------===//
   5358 // Target Optimization Hooks
   5359 //===----------------------------------------------------------------------===//
   5360 
   5361 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
   5362                                              DAGCombinerInfo &DCI) const {
   5363   const TargetMachine &TM = getTargetMachine();
   5364   SelectionDAG &DAG = DCI.DAG;
   5365   DebugLoc dl = N->getDebugLoc();
   5366   switch (N->getOpcode()) {
   5367   default: break;
   5368   case PPCISD::SHL:
   5369     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
   5370       if (C->isNullValue())   // 0 << V -> 0.
   5371         return N->getOperand(0);
   5372     }
   5373     break;
   5374   case PPCISD::SRL:
   5375     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
   5376       if (C->isNullValue())   // 0 >>u V -> 0.
   5377         return N->getOperand(0);
   5378     }
   5379     break;
   5380   case PPCISD::SRA:
   5381     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
   5382       if (C->isNullValue() ||   //  0 >>s V -> 0.
   5383           C->isAllOnesValue())    // -1 >>s V -> -1.
   5384         return N->getOperand(0);
   5385     }
   5386     break;
   5387 
   5388   case ISD::SINT_TO_FP:
   5389     if (TM.getSubtarget<PPCSubtarget>().has64BitSupport()) {
   5390       if (N->getOperand(0).getOpcode() == ISD::FP_TO_SINT) {
   5391         // Turn (sint_to_fp (fp_to_sint X)) -> fctidz/fcfid without load/stores.
   5392         // We allow the src/dst to be either f32/f64, but the intermediate
   5393         // type must be i64.
   5394         if (N->getOperand(0).getValueType() == MVT::i64 &&
   5395             N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) {
   5396           SDValue Val = N->getOperand(0).getOperand(0);
   5397           if (Val.getValueType() == MVT::f32) {
   5398             Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
   5399             DCI.AddToWorklist(Val.getNode());
   5400           }
   5401 
   5402           Val = DAG.getNode(PPCISD::FCTIDZ, dl, MVT::f64, Val);
   5403           DCI.AddToWorklist(Val.getNode());
   5404           Val = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Val);
   5405           DCI.AddToWorklist(Val.getNode());
   5406           if (N->getValueType(0) == MVT::f32) {
   5407             Val = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, Val,
   5408                               DAG.getIntPtrConstant(0));
   5409             DCI.AddToWorklist(Val.getNode());
   5410           }
   5411           return Val;
   5412         } else if (N->getOperand(0).getValueType() == MVT::i32) {
   5413           // If the intermediate type is i32, we can avoid the load/store here
   5414           // too.
   5415         }
   5416       }
   5417     }
   5418     break;
   5419   case ISD::STORE:
   5420     // Turn STORE (FP_TO_SINT F) -> STFIWX(FCTIWZ(F)).
   5421     if (TM.getSubtarget<PPCSubtarget>().hasSTFIWX() &&
   5422         !cast<StoreSDNode>(N)->isTruncatingStore() &&
   5423         N->getOperand(1).getOpcode() == ISD::FP_TO_SINT &&
   5424         N->getOperand(1).getValueType() == MVT::i32 &&
   5425         N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) {
   5426       SDValue Val = N->getOperand(1).getOperand(0);
   5427       if (Val.getValueType() == MVT::f32) {
   5428         Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
   5429         DCI.AddToWorklist(Val.getNode());
   5430       }
   5431       Val = DAG.getNode(PPCISD::FCTIWZ, dl, MVT::f64, Val);
   5432       DCI.AddToWorklist(Val.getNode());
   5433 
   5434       Val = DAG.getNode(PPCISD::STFIWX, dl, MVT::Other, N->getOperand(0), Val,
   5435                         N->getOperand(2), N->getOperand(3));
   5436       DCI.AddToWorklist(Val.getNode());
   5437       return Val;
   5438     }
   5439 
   5440     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
   5441     if (cast<StoreSDNode>(N)->isUnindexed() &&
   5442         N->getOperand(1).getOpcode() == ISD::BSWAP &&
   5443         N->getOperand(1).getNode()->hasOneUse() &&
   5444         (N->getOperand(1).getValueType() == MVT::i32 ||
   5445          N->getOperand(1).getValueType() == MVT::i16)) {
   5446       SDValue BSwapOp = N->getOperand(1).getOperand(0);
   5447       // Do an any-extend to 32-bits if this is a half-word input.
   5448       if (BSwapOp.getValueType() == MVT::i16)
   5449         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
   5450 
   5451       SDValue Ops[] = {
   5452         N->getOperand(0), BSwapOp, N->getOperand(2),
   5453         DAG.getValueType(N->getOperand(1).getValueType())
   5454       };
   5455       return
   5456         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
   5457                                 Ops, array_lengthof(Ops),
   5458                                 cast<StoreSDNode>(N)->getMemoryVT(),
   5459                                 cast<StoreSDNode>(N)->getMemOperand());
   5460     }
   5461     break;
   5462   case ISD::BSWAP:
   5463     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
   5464     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
   5465         N->getOperand(0).hasOneUse() &&
   5466         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) {
   5467       SDValue Load = N->getOperand(0);
   5468       LoadSDNode *LD = cast<LoadSDNode>(Load);
   5469       // Create the byte-swapping load.
   5470       SDValue Ops[] = {
   5471         LD->getChain(),    // Chain
   5472         LD->getBasePtr(),  // Ptr
   5473         DAG.getValueType(N->getValueType(0)) // VT
   5474       };
   5475       SDValue BSLoad =
   5476         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
   5477                                 DAG.getVTList(MVT::i32, MVT::Other), Ops, 3,
   5478                                 LD->getMemoryVT(), LD->getMemOperand());
   5479 
   5480       // If this is an i16 load, insert the truncate.
   5481       SDValue ResVal = BSLoad;
   5482       if (N->getValueType(0) == MVT::i16)
   5483         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
   5484 
   5485       // First, combine the bswap away.  This makes the value produced by the
   5486       // load dead.
   5487       DCI.CombineTo(N, ResVal);
   5488 
   5489       // Next, combine the load away, we give it a bogus result value but a real
   5490       // chain result.  The result value is dead because the bswap is dead.
   5491       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
   5492 
   5493       // Return N so it doesn't get rechecked!
   5494       return SDValue(N, 0);
   5495     }
   5496 
   5497     break;
   5498   case PPCISD::VCMP: {
   5499     // If a VCMPo node already exists with exactly the same operands as this
   5500     // node, use its result instead of this node (VCMPo computes both a CR6 and
   5501     // a normal output).
   5502     //
   5503     if (!N->getOperand(0).hasOneUse() &&
   5504         !N->getOperand(1).hasOneUse() &&
   5505         !N->getOperand(2).hasOneUse()) {
   5506 
   5507       // Scan all of the users of the LHS, looking for VCMPo's that match.
   5508       SDNode *VCMPoNode = 0;
   5509 
   5510       SDNode *LHSN = N->getOperand(0).getNode();
   5511       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
   5512            UI != E; ++UI)
   5513         if (UI->getOpcode() == PPCISD::VCMPo &&
   5514             UI->getOperand(1) == N->getOperand(1) &&
   5515             UI->getOperand(2) == N->getOperand(2) &&
   5516             UI->getOperand(0) == N->getOperand(0)) {
   5517           VCMPoNode = *UI;
   5518           break;
   5519         }
   5520 
   5521       // If there is no VCMPo node, or if the flag value has a single use, don't
   5522       // transform this.
   5523       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
   5524         break;
   5525 
   5526       // Look at the (necessarily single) use of the flag value.  If it has a
   5527       // chain, this transformation is more complex.  Note that multiple things
   5528       // could use the value result, which we should ignore.
   5529       SDNode *FlagUser = 0;
   5530       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
   5531            FlagUser == 0; ++UI) {
   5532         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
   5533         SDNode *User = *UI;
   5534         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
   5535           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
   5536             FlagUser = User;
   5537             break;
   5538           }
   5539         }
   5540       }
   5541 
   5542       // If the user is a MFCR instruction, we know this is safe.  Otherwise we
   5543       // give up for right now.
   5544       if (FlagUser->getOpcode() == PPCISD::MFCR)
   5545         return SDValue(VCMPoNode, 0);
   5546     }
   5547     break;
   5548   }
   5549   case ISD::BR_CC: {
   5550     // If this is a branch on an altivec predicate comparison, lower this so
   5551     // that we don't have to do a MFCR: instead, branch directly on CR6.  This
   5552     // lowering is done pre-legalize, because the legalizer lowers the predicate
   5553     // compare down to code that is difficult to reassemble.
   5554     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
   5555     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
   5556     int CompareOpc;
   5557     bool isDot;
   5558 
   5559     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
   5560         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
   5561         getAltivecCompareInfo(LHS, CompareOpc, isDot)) {
   5562       assert(isDot && "Can't compare against a vector result!");
   5563 
   5564       // If this is a comparison against something other than 0/1, then we know
   5565       // that the condition is never/always true.
   5566       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
   5567       if (Val != 0 && Val != 1) {
   5568         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
   5569           return N->getOperand(0);
   5570         // Always !=, turn it into an unconditional branch.
   5571         return DAG.getNode(ISD::BR, dl, MVT::Other,
   5572                            N->getOperand(0), N->getOperand(4));
   5573       }
   5574 
   5575       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
   5576 
   5577       // Create the PPCISD altivec 'dot' comparison node.
   5578       std::vector<EVT> VTs;
   5579       SDValue Ops[] = {
   5580         LHS.getOperand(2),  // LHS of compare
   5581         LHS.getOperand(3),  // RHS of compare
   5582         DAG.getConstant(CompareOpc, MVT::i32)
   5583       };
   5584       VTs.push_back(LHS.getOperand(2).getValueType());
   5585       VTs.push_back(MVT::Glue);
   5586       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops, 3);
   5587 
   5588       // Unpack the result based on how the target uses it.
   5589       PPC::Predicate CompOpc;
   5590       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
   5591       default:  // Can't happen, don't crash on invalid number though.
   5592       case 0:   // Branch on the value of the EQ bit of CR6.
   5593         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
   5594         break;
   5595       case 1:   // Branch on the inverted value of the EQ bit of CR6.
   5596         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
   5597         break;
   5598       case 2:   // Branch on the value of the LT bit of CR6.
   5599         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
   5600         break;
   5601       case 3:   // Branch on the inverted value of the LT bit of CR6.
   5602         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
   5603         break;
   5604       }
   5605 
   5606       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
   5607                          DAG.getConstant(CompOpc, MVT::i32),
   5608                          DAG.getRegister(PPC::CR6, MVT::i32),
   5609                          N->getOperand(4), CompNode.getValue(1));
   5610     }
   5611     break;
   5612   }
   5613   }
   5614 
   5615   return SDValue();
   5616 }
   5617 
   5618 //===----------------------------------------------------------------------===//
   5619 // Inline Assembly Support
   5620 //===----------------------------------------------------------------------===//
   5621 
   5622 void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
   5623                                                        APInt &KnownZero,
   5624                                                        APInt &KnownOne,
   5625                                                        const SelectionDAG &DAG,
   5626                                                        unsigned Depth) const {
   5627   KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
   5628   switch (Op.getOpcode()) {
   5629   default: break;
   5630   case PPCISD::LBRX: {
   5631     // lhbrx is known to have the top bits cleared out.
   5632     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
   5633       KnownZero = 0xFFFF0000;
   5634     break;
   5635   }
   5636   case ISD::INTRINSIC_WO_CHAIN: {
   5637     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
   5638     default: break;
   5639     case Intrinsic::ppc_altivec_vcmpbfp_p:
   5640     case Intrinsic::ppc_altivec_vcmpeqfp_p:
   5641     case Intrinsic::ppc_altivec_vcmpequb_p:
   5642     case Intrinsic::ppc_altivec_vcmpequh_p:
   5643     case Intrinsic::ppc_altivec_vcmpequw_p:
   5644     case Intrinsic::ppc_altivec_vcmpgefp_p:
   5645     case Intrinsic::ppc_altivec_vcmpgtfp_p:
   5646     case Intrinsic::ppc_altivec_vcmpgtsb_p:
   5647     case Intrinsic::ppc_altivec_vcmpgtsh_p:
   5648     case Intrinsic::ppc_altivec_vcmpgtsw_p:
   5649     case Intrinsic::ppc_altivec_vcmpgtub_p:
   5650     case Intrinsic::ppc_altivec_vcmpgtuh_p:
   5651     case Intrinsic::ppc_altivec_vcmpgtuw_p:
   5652       KnownZero = ~1U;  // All bits but the low one are known to be zero.
   5653       break;
   5654     }
   5655   }
   5656   }
   5657 }
   5658 
   5659 
   5660 /// getConstraintType - Given a constraint, return the type of
   5661 /// constraint it is for this target.
   5662 PPCTargetLowering::ConstraintType
   5663 PPCTargetLowering::getConstraintType(const std::string &Constraint) const {
   5664   if (Constraint.size() == 1) {
   5665     switch (Constraint[0]) {
   5666     default: break;
   5667     case 'b':
   5668     case 'r':
   5669     case 'f':
   5670     case 'v':
   5671     case 'y':
   5672       return C_RegisterClass;
   5673     }
   5674   }
   5675   return TargetLowering::getConstraintType(Constraint);
   5676 }
   5677 
   5678 /// Examine constraint type and operand type and determine a weight value.
   5679 /// This object must already have been set up with the operand type
   5680 /// and the current alternative constraint selected.
   5681 TargetLowering::ConstraintWeight
   5682 PPCTargetLowering::getSingleConstraintMatchWeight(
   5683     AsmOperandInfo &info, const char *constraint) const {
   5684   ConstraintWeight weight = CW_Invalid;
   5685   Value *CallOperandVal = info.CallOperandVal;
   5686     // If we don't have a value, we can't do a match,
   5687     // but allow it at the lowest weight.
   5688   if (CallOperandVal == NULL)
   5689     return CW_Default;
   5690   Type *type = CallOperandVal->getType();
   5691   // Look at the constraint type.
   5692   switch (*constraint) {
   5693   default:
   5694     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
   5695     break;
   5696   case 'b':
   5697     if (type->isIntegerTy())
   5698       weight = CW_Register;
   5699     break;
   5700   case 'f':
   5701     if (type->isFloatTy())
   5702       weight = CW_Register;
   5703     break;
   5704   case 'd':
   5705     if (type->isDoubleTy())
   5706       weight = CW_Register;
   5707     break;
   5708   case 'v':
   5709     if (type->isVectorTy())
   5710       weight = CW_Register;
   5711     break;
   5712   case 'y':
   5713     weight = CW_Register;
   5714     break;
   5715   }
   5716   return weight;
   5717 }
   5718 
   5719 std::pair<unsigned, const TargetRegisterClass*>
   5720 PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
   5721                                                 EVT VT) const {
   5722   if (Constraint.size() == 1) {
   5723     // GCC RS6000 Constraint Letters
   5724     switch (Constraint[0]) {
   5725     case 'b':   // R1-R31
   5726     case 'r':   // R0-R31
   5727       if (VT == MVT::i64 && PPCSubTarget.isPPC64())
   5728         return std::make_pair(0U, &PPC::G8RCRegClass);
   5729       return std::make_pair(0U, &PPC::GPRCRegClass);
   5730     case 'f':
   5731       if (VT == MVT::f32)
   5732         return std::make_pair(0U, &PPC::F4RCRegClass);
   5733       if (VT == MVT::f64)
   5734         return std::make_pair(0U, &PPC::F8RCRegClass);
   5735       break;
   5736     case 'v':
   5737       return std::make_pair(0U, &PPC::VRRCRegClass);
   5738     case 'y':   // crrc
   5739       return std::make_pair(0U, &PPC::CRRCRegClass);
   5740     }
   5741   }
   5742 
   5743   return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
   5744 }
   5745 
   5746 
   5747 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
   5748 /// vector.  If it is invalid, don't add anything to Ops.
   5749 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
   5750                                                      std::string &Constraint,
   5751                                                      std::vector<SDValue>&Ops,
   5752                                                      SelectionDAG &DAG) const {
   5753   SDValue Result(0,0);
   5754 
   5755   // Only support length 1 constraints.
   5756   if (Constraint.length() > 1) return;
   5757 
   5758   char Letter = Constraint[0];
   5759   switch (Letter) {
   5760   default: break;
   5761   case 'I':
   5762   case 'J':
   5763   case 'K':
   5764   case 'L':
   5765   case 'M':
   5766   case 'N':
   5767   case 'O':
   5768   case 'P': {
   5769     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
   5770     if (!CST) return; // Must be an immediate to match.
   5771     unsigned Value = CST->getZExtValue();
   5772     switch (Letter) {
   5773     default: llvm_unreachable("Unknown constraint letter!");
   5774     case 'I':  // "I" is a signed 16-bit constant.
   5775       if ((short)Value == (int)Value)
   5776         Result = DAG.getTargetConstant(Value, Op.getValueType());
   5777       break;
   5778     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
   5779     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
   5780       if ((short)Value == 0)
   5781         Result = DAG.getTargetConstant(Value, Op.getValueType());
   5782       break;
   5783     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
   5784       if ((Value >> 16) == 0)
   5785         Result = DAG.getTargetConstant(Value, Op.getValueType());
   5786       break;
   5787     case 'M':  // "M" is a constant that is greater than 31.
   5788       if (Value > 31)
   5789         Result = DAG.getTargetConstant(Value, Op.getValueType());
   5790       break;
   5791     case 'N':  // "N" is a positive constant that is an exact power of two.
   5792       if ((int)Value > 0 && isPowerOf2_32(Value))
   5793         Result = DAG.getTargetConstant(Value, Op.getValueType());
   5794       break;
   5795     case 'O':  // "O" is the constant zero.
   5796       if (Value == 0)
   5797         Result = DAG.getTargetConstant(Value, Op.getValueType());
   5798       break;
   5799     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
   5800       if ((short)-Value == (int)-Value)
   5801         Result = DAG.getTargetConstant(Value, Op.getValueType());
   5802       break;
   5803     }
   5804     break;
   5805   }
   5806   }
   5807 
   5808   if (Result.getNode()) {
   5809     Ops.push_back(Result);
   5810     return;
   5811   }
   5812 
   5813   // Handle standard constraint letters.
   5814   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
   5815 }
   5816 
   5817 // isLegalAddressingMode - Return true if the addressing mode represented
   5818 // by AM is legal for this target, for a load/store of the specified type.
   5819 bool PPCTargetLowering::isLegalAddressingMode(const AddrMode &AM,
   5820                                               Type *Ty) const {
   5821   // FIXME: PPC does not allow r+i addressing modes for vectors!
   5822 
   5823   // PPC allows a sign-extended 16-bit immediate field.
   5824   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
   5825     return false;
   5826 
   5827   // No global is ever allowed as a base.
   5828   if (AM.BaseGV)
   5829     return false;
   5830 
   5831   // PPC only support r+r,
   5832   switch (AM.Scale) {
   5833   case 0:  // "r+i" or just "i", depending on HasBaseReg.
   5834     break;
   5835   case 1:
   5836     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
   5837       return false;
   5838     // Otherwise we have r+r or r+i.
   5839     break;
   5840   case 2:
   5841     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
   5842       return false;
   5843     // Allow 2*r as r+r.
   5844     break;
   5845   default:
   5846     // No other scales are supported.
   5847     return false;
   5848   }
   5849 
   5850   return true;
   5851 }
   5852 
   5853 /// isLegalAddressImmediate - Return true if the integer value can be used
   5854 /// as the offset of the target addressing mode for load / store of the
   5855 /// given type.
   5856 bool PPCTargetLowering::isLegalAddressImmediate(int64_t V,Type *Ty) const{
   5857   // PPC allows a sign-extended 16-bit immediate field.
   5858   return (V > -(1 << 16) && V < (1 << 16)-1);
   5859 }
   5860 
   5861 bool PPCTargetLowering::isLegalAddressImmediate(GlobalValue* GV) const {
   5862   return false;
   5863 }
   5864 
   5865 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
   5866                                            SelectionDAG &DAG) const {
   5867   MachineFunction &MF = DAG.getMachineFunction();
   5868   MachineFrameInfo *MFI = MF.getFrameInfo();
   5869   MFI->setReturnAddressIsTaken(true);
   5870 
   5871   DebugLoc dl = Op.getDebugLoc();
   5872   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
   5873 
   5874   // Make sure the function does not optimize away the store of the RA to
   5875   // the stack.
   5876   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
   5877   FuncInfo->setLRStoreRequired();
   5878   bool isPPC64 = PPCSubTarget.isPPC64();
   5879   bool isDarwinABI = PPCSubTarget.isDarwinABI();
   5880 
   5881   if (Depth > 0) {
   5882     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
   5883     SDValue Offset =
   5884 
   5885       DAG.getConstant(PPCFrameLowering::getReturnSaveOffset(isPPC64, isDarwinABI),
   5886                       isPPC64? MVT::i64 : MVT::i32);
   5887     return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
   5888                        DAG.getNode(ISD::ADD, dl, getPointerTy(),
   5889                                    FrameAddr, Offset),
   5890                        MachinePointerInfo(), false, false, false, 0);
   5891   }
   5892 
   5893   // Just load the return address off the stack.
   5894   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
   5895   return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
   5896                      RetAddrFI, MachinePointerInfo(), false, false, false, 0);
   5897 }
   5898 
   5899 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
   5900                                           SelectionDAG &DAG) const {
   5901   DebugLoc dl = Op.getDebugLoc();
   5902   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
   5903 
   5904   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   5905   bool isPPC64 = PtrVT == MVT::i64;
   5906 
   5907   MachineFunction &MF = DAG.getMachineFunction();
   5908   MachineFrameInfo *MFI = MF.getFrameInfo();
   5909   MFI->setFrameAddressIsTaken(true);
   5910   bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
   5911                MFI->hasVarSizedObjects()) &&
   5912                   MFI->getStackSize() &&
   5913                   !MF.getFunction()->hasFnAttr(Attribute::Naked);
   5914   unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
   5915                                 (is31 ? PPC::R31 : PPC::R1);
   5916   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
   5917                                          PtrVT);
   5918   while (Depth--)
   5919     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
   5920                             FrameAddr, MachinePointerInfo(), false, false,
   5921                             false, 0);
   5922   return FrameAddr;
   5923 }
   5924 
   5925 bool
   5926 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
   5927   // The PowerPC target isn't yet aware of offsets.
   5928   return false;
   5929 }
   5930 
   5931 /// getOptimalMemOpType - Returns the target specific optimal type for load
   5932 /// and store operations as a result of memset, memcpy, and memmove
   5933 /// lowering. If DstAlign is zero that means it's safe to destination
   5934 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
   5935 /// means there isn't a need to check it against alignment requirement,
   5936 /// probably because the source does not need to be loaded. If
   5937 /// 'IsZeroVal' is true, that means it's safe to return a
   5938 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
   5939 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
   5940 /// constant so it does not need to be loaded.
   5941 /// It returns EVT::Other if the type should be determined using generic
   5942 /// target-independent logic.
   5943 EVT PPCTargetLowering::getOptimalMemOpType(uint64_t Size,
   5944                                            unsigned DstAlign, unsigned SrcAlign,
   5945                                            bool IsZeroVal,
   5946                                            bool MemcpyStrSrc,
   5947                                            MachineFunction &MF) const {
   5948   if (this->PPCSubTarget.isPPC64()) {
   5949     return MVT::i64;
   5950   } else {
   5951     return MVT::i32;
   5952   }
   5953 }
   5954 
   5955 /// isFMAFasterThanMulAndAdd - Return true if an FMA operation is faster than
   5956 /// a pair of mul and add instructions. fmuladd intrinsics will be expanded to
   5957 /// FMAs when this method returns true (and FMAs are legal), otherwise fmuladd
   5958 /// is expanded to mul + add.
   5959 bool PPCTargetLowering::isFMAFasterThanMulAndAdd(EVT VT) const {
   5960   if (!VT.isSimple())
   5961     return false;
   5962 
   5963   switch (VT.getSimpleVT().SimpleTy) {
   5964   case MVT::f32:
   5965   case MVT::f64:
   5966   case MVT::v4f32:
   5967     return true;
   5968   default:
   5969     break;
   5970   }
   5971 
   5972   return false;
   5973 }
   5974 
   5975 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
   5976   if (DisableILPPref)
   5977     return TargetLowering::getSchedulingPreference(N);
   5978 
   5979   return Sched::ILP;
   5980 }
   5981 
   5982