Home | History | Annotate | Download | only in CodeGen
      1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This implements the TargetLoweringBase class.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include "llvm/ADT/BitVector.h"
     15 #include "llvm/ADT/STLExtras.h"
     16 #include "llvm/ADT/SmallVector.h"
     17 #include "llvm/ADT/StringExtras.h"
     18 #include "llvm/ADT/StringRef.h"
     19 #include "llvm/ADT/Triple.h"
     20 #include "llvm/ADT/Twine.h"
     21 #include "llvm/CodeGen/Analysis.h"
     22 #include "llvm/CodeGen/ISDOpcodes.h"
     23 #include "llvm/CodeGen/MachineBasicBlock.h"
     24 #include "llvm/CodeGen/MachineFrameInfo.h"
     25 #include "llvm/CodeGen/MachineFunction.h"
     26 #include "llvm/CodeGen/MachineInstr.h"
     27 #include "llvm/CodeGen/MachineInstrBuilder.h"
     28 #include "llvm/CodeGen/MachineMemOperand.h"
     29 #include "llvm/CodeGen/MachineOperand.h"
     30 #include "llvm/CodeGen/MachineRegisterInfo.h"
     31 #include "llvm/CodeGen/RuntimeLibcalls.h"
     32 #include "llvm/CodeGen/StackMaps.h"
     33 #include "llvm/CodeGen/TargetLowering.h"
     34 #include "llvm/CodeGen/TargetOpcodes.h"
     35 #include "llvm/CodeGen/TargetRegisterInfo.h"
     36 #include "llvm/CodeGen/ValueTypes.h"
     37 #include "llvm/IR/Attributes.h"
     38 #include "llvm/IR/CallingConv.h"
     39 #include "llvm/IR/DataLayout.h"
     40 #include "llvm/IR/DerivedTypes.h"
     41 #include "llvm/IR/Function.h"
     42 #include "llvm/IR/GlobalValue.h"
     43 #include "llvm/IR/GlobalVariable.h"
     44 #include "llvm/IR/IRBuilder.h"
     45 #include "llvm/IR/Module.h"
     46 #include "llvm/IR/Type.h"
     47 #include "llvm/Support/BranchProbability.h"
     48 #include "llvm/Support/Casting.h"
     49 #include "llvm/Support/CommandLine.h"
     50 #include "llvm/Support/Compiler.h"
     51 #include "llvm/Support/ErrorHandling.h"
     52 #include "llvm/Support/MachineValueType.h"
     53 #include "llvm/Support/MathExtras.h"
     54 #include "llvm/Target/TargetMachine.h"
     55 #include <algorithm>
     56 #include <cassert>
     57 #include <cstddef>
     58 #include <cstdint>
     59 #include <cstring>
     60 #include <iterator>
     61 #include <string>
     62 #include <tuple>
     63 #include <utility>
     64 
     65 using namespace llvm;
     66 
     67 static cl::opt<bool> JumpIsExpensiveOverride(
     68     "jump-is-expensive", cl::init(false),
     69     cl::desc("Do not create extra branches to split comparison logic."),
     70     cl::Hidden);
     71 
     72 static cl::opt<unsigned> MinimumJumpTableEntries
     73   ("min-jump-table-entries", cl::init(4), cl::Hidden,
     74    cl::desc("Set minimum number of entries to use a jump table."));
     75 
     76 static cl::opt<unsigned> MaximumJumpTableSize
     77   ("max-jump-table-size", cl::init(0), cl::Hidden,
     78    cl::desc("Set maximum size of jump tables; zero for no limit."));
     79 
     80 /// Minimum jump table density for normal functions.
     81 static cl::opt<unsigned>
     82     JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
     83                      cl::desc("Minimum density for building a jump table in "
     84                               "a normal function"));
     85 
     86 /// Minimum jump table density for -Os or -Oz functions.
     87 static cl::opt<unsigned> OptsizeJumpTableDensity(
     88     "optsize-jump-table-density", cl::init(40), cl::Hidden,
     89     cl::desc("Minimum density for building a jump table in "
     90              "an optsize function"));
     91 
     92 static bool darwinHasSinCos(const Triple &TT) {
     93   assert(TT.isOSDarwin() && "should be called with darwin triple");
     94   // Don't bother with 32 bit x86.
     95   if (TT.getArch() == Triple::x86)
     96     return false;
     97   // Macos < 10.9 has no sincos_stret.
     98   if (TT.isMacOSX())
     99     return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
    100   // iOS < 7.0 has no sincos_stret.
    101   if (TT.isiOS())
    102     return !TT.isOSVersionLT(7, 0);
    103   // Any other darwin such as WatchOS/TvOS is new enough.
    104   return true;
    105 }
    106 
    107 // Although this default value is arbitrary, it is not random. It is assumed
    108 // that a condition that evaluates the same way by a higher percentage than this
    109 // is best represented as control flow. Therefore, the default value N should be
    110 // set such that the win from N% correct executions is greater than the loss
    111 // from (100 - N)% mispredicted executions for the majority of intended targets.
    112 static cl::opt<int> MinPercentageForPredictableBranch(
    113     "min-predictable-branch", cl::init(99),
    114     cl::desc("Minimum percentage (0-100) that a condition must be either true "
    115              "or false to assume that the condition is predictable"),
    116     cl::Hidden);
    117 
    118 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
    119 #define HANDLE_LIBCALL(code, name) \
    120   setLibcallName(RTLIB::code, name);
    121 #include "llvm/IR/RuntimeLibcalls.def"
    122 #undef HANDLE_LIBCALL
    123   // Initialize calling conventions to their default.
    124   for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
    125     setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
    126 
    127   // A few names are different on particular architectures or environments.
    128   if (TT.isOSDarwin()) {
    129     // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
    130     // of the gnueabi-style __gnu_*_ieee.
    131     // FIXME: What about other targets?
    132     setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
    133     setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
    134 
    135     // Some darwins have an optimized __bzero/bzero function.
    136     switch (TT.getArch()) {
    137     case Triple::x86:
    138     case Triple::x86_64:
    139       if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
    140         setLibcallName(RTLIB::BZERO, "__bzero");
    141       break;
    142     case Triple::aarch64:
    143       setLibcallName(RTLIB::BZERO, "bzero");
    144       break;
    145     default:
    146       break;
    147     }
    148 
    149     if (darwinHasSinCos(TT)) {
    150       setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
    151       setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
    152       if (TT.isWatchABI()) {
    153         setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
    154                               CallingConv::ARM_AAPCS_VFP);
    155         setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
    156                               CallingConv::ARM_AAPCS_VFP);
    157       }
    158     }
    159   } else {
    160     setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
    161     setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
    162   }
    163 
    164   if (TT.isGNUEnvironment() || TT.isOSFuchsia()) {
    165     setLibcallName(RTLIB::SINCOS_F32, "sincosf");
    166     setLibcallName(RTLIB::SINCOS_F64, "sincos");
    167     setLibcallName(RTLIB::SINCOS_F80, "sincosl");
    168     setLibcallName(RTLIB::SINCOS_F128, "sincosl");
    169     setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
    170   }
    171 
    172   if (TT.isOSOpenBSD()) {
    173     setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
    174   }
    175 }
    176 
    177 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
    178 /// UNKNOWN_LIBCALL if there is none.
    179 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
    180   if (OpVT == MVT::f16) {
    181     if (RetVT == MVT::f32)
    182       return FPEXT_F16_F32;
    183   } else if (OpVT == MVT::f32) {
    184     if (RetVT == MVT::f64)
    185       return FPEXT_F32_F64;
    186     if (RetVT == MVT::f128)
    187       return FPEXT_F32_F128;
    188     if (RetVT == MVT::ppcf128)
    189       return FPEXT_F32_PPCF128;
    190   } else if (OpVT == MVT::f64) {
    191     if (RetVT == MVT::f128)
    192       return FPEXT_F64_F128;
    193     else if (RetVT == MVT::ppcf128)
    194       return FPEXT_F64_PPCF128;
    195   } else if (OpVT == MVT::f80) {
    196     if (RetVT == MVT::f128)
    197       return FPEXT_F80_F128;
    198   }
    199 
    200   return UNKNOWN_LIBCALL;
    201 }
    202 
    203 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
    204 /// UNKNOWN_LIBCALL if there is none.
    205 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
    206   if (RetVT == MVT::f16) {
    207     if (OpVT == MVT::f32)
    208       return FPROUND_F32_F16;
    209     if (OpVT == MVT::f64)
    210       return FPROUND_F64_F16;
    211     if (OpVT == MVT::f80)
    212       return FPROUND_F80_F16;
    213     if (OpVT == MVT::f128)
    214       return FPROUND_F128_F16;
    215     if (OpVT == MVT::ppcf128)
    216       return FPROUND_PPCF128_F16;
    217   } else if (RetVT == MVT::f32) {
    218     if (OpVT == MVT::f64)
    219       return FPROUND_F64_F32;
    220     if (OpVT == MVT::f80)
    221       return FPROUND_F80_F32;
    222     if (OpVT == MVT::f128)
    223       return FPROUND_F128_F32;
    224     if (OpVT == MVT::ppcf128)
    225       return FPROUND_PPCF128_F32;
    226   } else if (RetVT == MVT::f64) {
    227     if (OpVT == MVT::f80)
    228       return FPROUND_F80_F64;
    229     if (OpVT == MVT::f128)
    230       return FPROUND_F128_F64;
    231     if (OpVT == MVT::ppcf128)
    232       return FPROUND_PPCF128_F64;
    233   } else if (RetVT == MVT::f80) {
    234     if (OpVT == MVT::f128)
    235       return FPROUND_F128_F80;
    236   }
    237 
    238   return UNKNOWN_LIBCALL;
    239 }
    240 
    241 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
    242 /// UNKNOWN_LIBCALL if there is none.
    243 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
    244   if (OpVT == MVT::f32) {
    245     if (RetVT == MVT::i32)
    246       return FPTOSINT_F32_I32;
    247     if (RetVT == MVT::i64)
    248       return FPTOSINT_F32_I64;
    249     if (RetVT == MVT::i128)
    250       return FPTOSINT_F32_I128;
    251   } else if (OpVT == MVT::f64) {
    252     if (RetVT == MVT::i32)
    253       return FPTOSINT_F64_I32;
    254     if (RetVT == MVT::i64)
    255       return FPTOSINT_F64_I64;
    256     if (RetVT == MVT::i128)
    257       return FPTOSINT_F64_I128;
    258   } else if (OpVT == MVT::f80) {
    259     if (RetVT == MVT::i32)
    260       return FPTOSINT_F80_I32;
    261     if (RetVT == MVT::i64)
    262       return FPTOSINT_F80_I64;
    263     if (RetVT == MVT::i128)
    264       return FPTOSINT_F80_I128;
    265   } else if (OpVT == MVT::f128) {
    266     if (RetVT == MVT::i32)
    267       return FPTOSINT_F128_I32;
    268     if (RetVT == MVT::i64)
    269       return FPTOSINT_F128_I64;
    270     if (RetVT == MVT::i128)
    271       return FPTOSINT_F128_I128;
    272   } else if (OpVT == MVT::ppcf128) {
    273     if (RetVT == MVT::i32)
    274       return FPTOSINT_PPCF128_I32;
    275     if (RetVT == MVT::i64)
    276       return FPTOSINT_PPCF128_I64;
    277     if (RetVT == MVT::i128)
    278       return FPTOSINT_PPCF128_I128;
    279   }
    280   return UNKNOWN_LIBCALL;
    281 }
    282 
    283 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
    284 /// UNKNOWN_LIBCALL if there is none.
    285 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
    286   if (OpVT == MVT::f32) {
    287     if (RetVT == MVT::i32)
    288       return FPTOUINT_F32_I32;
    289     if (RetVT == MVT::i64)
    290       return FPTOUINT_F32_I64;
    291     if (RetVT == MVT::i128)
    292       return FPTOUINT_F32_I128;
    293   } else if (OpVT == MVT::f64) {
    294     if (RetVT == MVT::i32)
    295       return FPTOUINT_F64_I32;
    296     if (RetVT == MVT::i64)
    297       return FPTOUINT_F64_I64;
    298     if (RetVT == MVT::i128)
    299       return FPTOUINT_F64_I128;
    300   } else if (OpVT == MVT::f80) {
    301     if (RetVT == MVT::i32)
    302       return FPTOUINT_F80_I32;
    303     if (RetVT == MVT::i64)
    304       return FPTOUINT_F80_I64;
    305     if (RetVT == MVT::i128)
    306       return FPTOUINT_F80_I128;
    307   } else if (OpVT == MVT::f128) {
    308     if (RetVT == MVT::i32)
    309       return FPTOUINT_F128_I32;
    310     if (RetVT == MVT::i64)
    311       return FPTOUINT_F128_I64;
    312     if (RetVT == MVT::i128)
    313       return FPTOUINT_F128_I128;
    314   } else if (OpVT == MVT::ppcf128) {
    315     if (RetVT == MVT::i32)
    316       return FPTOUINT_PPCF128_I32;
    317     if (RetVT == MVT::i64)
    318       return FPTOUINT_PPCF128_I64;
    319     if (RetVT == MVT::i128)
    320       return FPTOUINT_PPCF128_I128;
    321   }
    322   return UNKNOWN_LIBCALL;
    323 }
    324 
    325 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
    326 /// UNKNOWN_LIBCALL if there is none.
    327 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
    328   if (OpVT == MVT::i32) {
    329     if (RetVT == MVT::f32)
    330       return SINTTOFP_I32_F32;
    331     if (RetVT == MVT::f64)
    332       return SINTTOFP_I32_F64;
    333     if (RetVT == MVT::f80)
    334       return SINTTOFP_I32_F80;
    335     if (RetVT == MVT::f128)
    336       return SINTTOFP_I32_F128;
    337     if (RetVT == MVT::ppcf128)
    338       return SINTTOFP_I32_PPCF128;
    339   } else if (OpVT == MVT::i64) {
    340     if (RetVT == MVT::f32)
    341       return SINTTOFP_I64_F32;
    342     if (RetVT == MVT::f64)
    343       return SINTTOFP_I64_F64;
    344     if (RetVT == MVT::f80)
    345       return SINTTOFP_I64_F80;
    346     if (RetVT == MVT::f128)
    347       return SINTTOFP_I64_F128;
    348     if (RetVT == MVT::ppcf128)
    349       return SINTTOFP_I64_PPCF128;
    350   } else if (OpVT == MVT::i128) {
    351     if (RetVT == MVT::f32)
    352       return SINTTOFP_I128_F32;
    353     if (RetVT == MVT::f64)
    354       return SINTTOFP_I128_F64;
    355     if (RetVT == MVT::f80)
    356       return SINTTOFP_I128_F80;
    357     if (RetVT == MVT::f128)
    358       return SINTTOFP_I128_F128;
    359     if (RetVT == MVT::ppcf128)
    360       return SINTTOFP_I128_PPCF128;
    361   }
    362   return UNKNOWN_LIBCALL;
    363 }
    364 
    365 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
    366 /// UNKNOWN_LIBCALL if there is none.
    367 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
    368   if (OpVT == MVT::i32) {
    369     if (RetVT == MVT::f32)
    370       return UINTTOFP_I32_F32;
    371     if (RetVT == MVT::f64)
    372       return UINTTOFP_I32_F64;
    373     if (RetVT == MVT::f80)
    374       return UINTTOFP_I32_F80;
    375     if (RetVT == MVT::f128)
    376       return UINTTOFP_I32_F128;
    377     if (RetVT == MVT::ppcf128)
    378       return UINTTOFP_I32_PPCF128;
    379   } else if (OpVT == MVT::i64) {
    380     if (RetVT == MVT::f32)
    381       return UINTTOFP_I64_F32;
    382     if (RetVT == MVT::f64)
    383       return UINTTOFP_I64_F64;
    384     if (RetVT == MVT::f80)
    385       return UINTTOFP_I64_F80;
    386     if (RetVT == MVT::f128)
    387       return UINTTOFP_I64_F128;
    388     if (RetVT == MVT::ppcf128)
    389       return UINTTOFP_I64_PPCF128;
    390   } else if (OpVT == MVT::i128) {
    391     if (RetVT == MVT::f32)
    392       return UINTTOFP_I128_F32;
    393     if (RetVT == MVT::f64)
    394       return UINTTOFP_I128_F64;
    395     if (RetVT == MVT::f80)
    396       return UINTTOFP_I128_F80;
    397     if (RetVT == MVT::f128)
    398       return UINTTOFP_I128_F128;
    399     if (RetVT == MVT::ppcf128)
    400       return UINTTOFP_I128_PPCF128;
    401   }
    402   return UNKNOWN_LIBCALL;
    403 }
    404 
    405 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
    406 #define OP_TO_LIBCALL(Name, Enum)                                              \
    407   case Name:                                                                   \
    408     switch (VT.SimpleTy) {                                                     \
    409     default:                                                                   \
    410       return UNKNOWN_LIBCALL;                                                  \
    411     case MVT::i8:                                                              \
    412       return Enum##_1;                                                         \
    413     case MVT::i16:                                                             \
    414       return Enum##_2;                                                         \
    415     case MVT::i32:                                                             \
    416       return Enum##_4;                                                         \
    417     case MVT::i64:                                                             \
    418       return Enum##_8;                                                         \
    419     case MVT::i128:                                                            \
    420       return Enum##_16;                                                        \
    421     }
    422 
    423   switch (Opc) {
    424     OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
    425     OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
    426     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
    427     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
    428     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
    429     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
    430     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
    431     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
    432     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
    433     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
    434     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
    435     OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
    436   }
    437 
    438 #undef OP_TO_LIBCALL
    439 
    440   return UNKNOWN_LIBCALL;
    441 }
    442 
    443 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
    444   switch (ElementSize) {
    445   case 1:
    446     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
    447   case 2:
    448     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
    449   case 4:
    450     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
    451   case 8:
    452     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
    453   case 16:
    454     return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
    455   default:
    456     return UNKNOWN_LIBCALL;
    457   }
    458 }
    459 
    460 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
    461   switch (ElementSize) {
    462   case 1:
    463     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
    464   case 2:
    465     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
    466   case 4:
    467     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
    468   case 8:
    469     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
    470   case 16:
    471     return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
    472   default:
    473     return UNKNOWN_LIBCALL;
    474   }
    475 }
    476 
    477 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
    478   switch (ElementSize) {
    479   case 1:
    480     return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
    481   case 2:
    482     return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
    483   case 4:
    484     return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
    485   case 8:
    486     return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
    487   case 16:
    488     return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
    489   default:
    490     return UNKNOWN_LIBCALL;
    491   }
    492 }
    493 
    494 /// InitCmpLibcallCCs - Set default comparison libcall CC.
    495 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
    496   memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
    497   CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
    498   CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
    499   CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
    500   CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
    501   CCs[RTLIB::UNE_F32] = ISD::SETNE;
    502   CCs[RTLIB::UNE_F64] = ISD::SETNE;
    503   CCs[RTLIB::UNE_F128] = ISD::SETNE;
    504   CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
    505   CCs[RTLIB::OGE_F32] = ISD::SETGE;
    506   CCs[RTLIB::OGE_F64] = ISD::SETGE;
    507   CCs[RTLIB::OGE_F128] = ISD::SETGE;
    508   CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
    509   CCs[RTLIB::OLT_F32] = ISD::SETLT;
    510   CCs[RTLIB::OLT_F64] = ISD::SETLT;
    511   CCs[RTLIB::OLT_F128] = ISD::SETLT;
    512   CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
    513   CCs[RTLIB::OLE_F32] = ISD::SETLE;
    514   CCs[RTLIB::OLE_F64] = ISD::SETLE;
    515   CCs[RTLIB::OLE_F128] = ISD::SETLE;
    516   CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
    517   CCs[RTLIB::OGT_F32] = ISD::SETGT;
    518   CCs[RTLIB::OGT_F64] = ISD::SETGT;
    519   CCs[RTLIB::OGT_F128] = ISD::SETGT;
    520   CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
    521   CCs[RTLIB::UO_F32] = ISD::SETNE;
    522   CCs[RTLIB::UO_F64] = ISD::SETNE;
    523   CCs[RTLIB::UO_F128] = ISD::SETNE;
    524   CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
    525   CCs[RTLIB::O_F32] = ISD::SETEQ;
    526   CCs[RTLIB::O_F64] = ISD::SETEQ;
    527   CCs[RTLIB::O_F128] = ISD::SETEQ;
    528   CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
    529 }
    530 
    531 /// NOTE: The TargetMachine owns TLOF.
    532 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
    533   initActions();
    534 
    535   // Perform these initializations only once.
    536   MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
    537       MaxLoadsPerMemcmp = 8;
    538   MaxGluedStoresPerMemcpy = 0;
    539   MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
    540       MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
    541   UseUnderscoreSetJmp = false;
    542   UseUnderscoreLongJmp = false;
    543   HasMultipleConditionRegisters = false;
    544   HasExtractBitsInsn = false;
    545   JumpIsExpensive = JumpIsExpensiveOverride;
    546   PredictableSelectIsExpensive = false;
    547   EnableExtLdPromotion = false;
    548   HasFloatingPointExceptions = true;
    549   StackPointerRegisterToSaveRestore = 0;
    550   BooleanContents = UndefinedBooleanContent;
    551   BooleanFloatContents = UndefinedBooleanContent;
    552   BooleanVectorContents = UndefinedBooleanContent;
    553   SchedPreferenceInfo = Sched::ILP;
    554   JumpBufSize = 0;
    555   JumpBufAlignment = 0;
    556   MinFunctionAlignment = 0;
    557   PrefFunctionAlignment = 0;
    558   PrefLoopAlignment = 0;
    559   GatherAllAliasesMaxDepth = 18;
    560   MinStackArgumentAlignment = 1;
    561   // TODO: the default will be switched to 0 in the next commit, along
    562   // with the Target-specific changes necessary.
    563   MaxAtomicSizeInBitsSupported = 1024;
    564 
    565   MinCmpXchgSizeInBits = 0;
    566   SupportsUnalignedAtomics = false;
    567 
    568   std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
    569 
    570   InitLibcalls(TM.getTargetTriple());
    571   InitCmpLibcallCCs(CmpLibcallCCs);
    572 }
    573 
    574 void TargetLoweringBase::initActions() {
    575   // All operations default to being supported.
    576   memset(OpActions, 0, sizeof(OpActions));
    577   memset(LoadExtActions, 0, sizeof(LoadExtActions));
    578   memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
    579   memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
    580   memset(CondCodeActions, 0, sizeof(CondCodeActions));
    581   std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
    582   std::fill(std::begin(TargetDAGCombineArray),
    583             std::end(TargetDAGCombineArray), 0);
    584 
    585   // Set default actions for various operations.
    586   for (MVT VT : MVT::all_valuetypes()) {
    587     // Default all indexed load / store to expand.
    588     for (unsigned IM = (unsigned)ISD::PRE_INC;
    589          IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
    590       setIndexedLoadAction(IM, VT, Expand);
    591       setIndexedStoreAction(IM, VT, Expand);
    592     }
    593 
    594     // Most backends expect to see the node which just returns the value loaded.
    595     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
    596 
    597     // These operations default to expand.
    598     setOperationAction(ISD::FGETSIGN, VT, Expand);
    599     setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
    600     setOperationAction(ISD::FMINNUM, VT, Expand);
    601     setOperationAction(ISD::FMAXNUM, VT, Expand);
    602     setOperationAction(ISD::FMINNAN, VT, Expand);
    603     setOperationAction(ISD::FMAXNAN, VT, Expand);
    604     setOperationAction(ISD::FMAD, VT, Expand);
    605     setOperationAction(ISD::SMIN, VT, Expand);
    606     setOperationAction(ISD::SMAX, VT, Expand);
    607     setOperationAction(ISD::UMIN, VT, Expand);
    608     setOperationAction(ISD::UMAX, VT, Expand);
    609     setOperationAction(ISD::ABS, VT, Expand);
    610 
    611     // Overflow operations default to expand
    612     setOperationAction(ISD::SADDO, VT, Expand);
    613     setOperationAction(ISD::SSUBO, VT, Expand);
    614     setOperationAction(ISD::UADDO, VT, Expand);
    615     setOperationAction(ISD::USUBO, VT, Expand);
    616     setOperationAction(ISD::SMULO, VT, Expand);
    617     setOperationAction(ISD::UMULO, VT, Expand);
    618 
    619     // ADDCARRY operations default to expand
    620     setOperationAction(ISD::ADDCARRY, VT, Expand);
    621     setOperationAction(ISD::SUBCARRY, VT, Expand);
    622     setOperationAction(ISD::SETCCCARRY, VT, Expand);
    623 
    624     // ADDC/ADDE/SUBC/SUBE default to expand.
    625     setOperationAction(ISD::ADDC, VT, Expand);
    626     setOperationAction(ISD::ADDE, VT, Expand);
    627     setOperationAction(ISD::SUBC, VT, Expand);
    628     setOperationAction(ISD::SUBE, VT, Expand);
    629 
    630     // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
    631     setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
    632     setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
    633 
    634     setOperationAction(ISD::BITREVERSE, VT, Expand);
    635 
    636     // These library functions default to expand.
    637     setOperationAction(ISD::FROUND, VT, Expand);
    638     setOperationAction(ISD::FPOWI, VT, Expand);
    639 
    640     // These operations default to expand for vector types.
    641     if (VT.isVector()) {
    642       setOperationAction(ISD::FCOPYSIGN, VT, Expand);
    643       setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
    644       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
    645       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
    646     }
    647 
    648     // For most targets @llvm.get.dynamic.area.offset just returns 0.
    649     setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
    650   }
    651 
    652   // Most targets ignore the @llvm.prefetch intrinsic.
    653   setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
    654 
    655   // Most targets also ignore the @llvm.readcyclecounter intrinsic.
    656   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
    657 
    658   // ConstantFP nodes default to expand.  Targets can either change this to
    659   // Legal, in which case all fp constants are legal, or use isFPImmLegal()
    660   // to optimize expansions for certain constants.
    661   setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
    662   setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
    663   setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
    664   setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
    665   setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
    666 
    667   // These library functions default to expand.
    668   for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
    669     setOperationAction(ISD::FLOG ,      VT, Expand);
    670     setOperationAction(ISD::FLOG2,      VT, Expand);
    671     setOperationAction(ISD::FLOG10,     VT, Expand);
    672     setOperationAction(ISD::FEXP ,      VT, Expand);
    673     setOperationAction(ISD::FEXP2,      VT, Expand);
    674     setOperationAction(ISD::FFLOOR,     VT, Expand);
    675     setOperationAction(ISD::FNEARBYINT, VT, Expand);
    676     setOperationAction(ISD::FCEIL,      VT, Expand);
    677     setOperationAction(ISD::FRINT,      VT, Expand);
    678     setOperationAction(ISD::FTRUNC,     VT, Expand);
    679     setOperationAction(ISD::FROUND,     VT, Expand);
    680   }
    681 
    682   // Default ISD::TRAP to expand (which turns it into abort).
    683   setOperationAction(ISD::TRAP, MVT::Other, Expand);
    684 
    685   // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
    686   // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
    687   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
    688 }
    689 
    690 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
    691                                                EVT) const {
    692   return MVT::getIntegerVT(8 * DL.getPointerSize(0));
    693 }
    694 
    695 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
    696                                          bool LegalTypes) const {
    697   assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
    698   if (LHSTy.isVector())
    699     return LHSTy;
    700   return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
    701                     : getPointerTy(DL);
    702 }
    703 
    704 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
    705   assert(isTypeLegal(VT));
    706   switch (Op) {
    707   default:
    708     return false;
    709   case ISD::SDIV:
    710   case ISD::UDIV:
    711   case ISD::SREM:
    712   case ISD::UREM:
    713     return true;
    714   }
    715 }
    716 
    717 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
    718   // If the command-line option was specified, ignore this request.
    719   if (!JumpIsExpensiveOverride.getNumOccurrences())
    720     JumpIsExpensive = isExpensive;
    721 }
    722 
    723 TargetLoweringBase::LegalizeKind
    724 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
    725   // If this is a simple type, use the ComputeRegisterProp mechanism.
    726   if (VT.isSimple()) {
    727     MVT SVT = VT.getSimpleVT();
    728     assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
    729     MVT NVT = TransformToType[SVT.SimpleTy];
    730     LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
    731 
    732     assert((LA == TypeLegal || LA == TypeSoftenFloat ||
    733             ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
    734            "Promote may not follow Expand or Promote");
    735 
    736     if (LA == TypeSplitVector)
    737       return LegalizeKind(LA,
    738                           EVT::getVectorVT(Context, SVT.getVectorElementType(),
    739                                            SVT.getVectorNumElements() / 2));
    740     if (LA == TypeScalarizeVector)
    741       return LegalizeKind(LA, SVT.getVectorElementType());
    742     return LegalizeKind(LA, NVT);
    743   }
    744 
    745   // Handle Extended Scalar Types.
    746   if (!VT.isVector()) {
    747     assert(VT.isInteger() && "Float types must be simple");
    748     unsigned BitSize = VT.getSizeInBits();
    749     // First promote to a power-of-two size, then expand if necessary.
    750     if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
    751       EVT NVT = VT.getRoundIntegerType(Context);
    752       assert(NVT != VT && "Unable to round integer VT");
    753       LegalizeKind NextStep = getTypeConversion(Context, NVT);
    754       // Avoid multi-step promotion.
    755       if (NextStep.first == TypePromoteInteger)
    756         return NextStep;
    757       // Return rounded integer type.
    758       return LegalizeKind(TypePromoteInteger, NVT);
    759     }
    760 
    761     return LegalizeKind(TypeExpandInteger,
    762                         EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
    763   }
    764 
    765   // Handle vector types.
    766   unsigned NumElts = VT.getVectorNumElements();
    767   EVT EltVT = VT.getVectorElementType();
    768 
    769   // Vectors with only one element are always scalarized.
    770   if (NumElts == 1)
    771     return LegalizeKind(TypeScalarizeVector, EltVT);
    772 
    773   // Try to widen vector elements until the element type is a power of two and
    774   // promote it to a legal type later on, for example:
    775   // <3 x i8> -> <4 x i8> -> <4 x i32>
    776   if (EltVT.isInteger()) {
    777     // Vectors with a number of elements that is not a power of two are always
    778     // widened, for example <3 x i8> -> <4 x i8>.
    779     if (!VT.isPow2VectorType()) {
    780       NumElts = (unsigned)NextPowerOf2(NumElts);
    781       EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
    782       return LegalizeKind(TypeWidenVector, NVT);
    783     }
    784 
    785     // Examine the element type.
    786     LegalizeKind LK = getTypeConversion(Context, EltVT);
    787 
    788     // If type is to be expanded, split the vector.
    789     //  <4 x i140> -> <2 x i140>
    790     if (LK.first == TypeExpandInteger)
    791       return LegalizeKind(TypeSplitVector,
    792                           EVT::getVectorVT(Context, EltVT, NumElts / 2));
    793 
    794     // Promote the integer element types until a legal vector type is found
    795     // or until the element integer type is too big. If a legal type was not
    796     // found, fallback to the usual mechanism of widening/splitting the
    797     // vector.
    798     EVT OldEltVT = EltVT;
    799     while (true) {
    800       // Increase the bitwidth of the element to the next pow-of-two
    801       // (which is greater than 8 bits).
    802       EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
    803                   .getRoundIntegerType(Context);
    804 
    805       // Stop trying when getting a non-simple element type.
    806       // Note that vector elements may be greater than legal vector element
    807       // types. Example: X86 XMM registers hold 64bit element on 32bit
    808       // systems.
    809       if (!EltVT.isSimple())
    810         break;
    811 
    812       // Build a new vector type and check if it is legal.
    813       MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
    814       // Found a legal promoted vector type.
    815       if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
    816         return LegalizeKind(TypePromoteInteger,
    817                             EVT::getVectorVT(Context, EltVT, NumElts));
    818     }
    819 
    820     // Reset the type to the unexpanded type if we did not find a legal vector
    821     // type with a promoted vector element type.
    822     EltVT = OldEltVT;
    823   }
    824 
    825   // Try to widen the vector until a legal type is found.
    826   // If there is no wider legal type, split the vector.
    827   while (true) {
    828     // Round up to the next power of 2.
    829     NumElts = (unsigned)NextPowerOf2(NumElts);
    830 
    831     // If there is no simple vector type with this many elements then there
    832     // cannot be a larger legal vector type.  Note that this assumes that
    833     // there are no skipped intermediate vector types in the simple types.
    834     if (!EltVT.isSimple())
    835       break;
    836     MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
    837     if (LargerVector == MVT())
    838       break;
    839 
    840     // If this type is legal then widen the vector.
    841     if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
    842       return LegalizeKind(TypeWidenVector, LargerVector);
    843   }
    844 
    845   // Widen odd vectors to next power of two.
    846   if (!VT.isPow2VectorType()) {
    847     EVT NVT = VT.getPow2VectorType(Context);
    848     return LegalizeKind(TypeWidenVector, NVT);
    849   }
    850 
    851   // Vectors with illegal element types are expanded.
    852   EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
    853   return LegalizeKind(TypeSplitVector, NVT);
    854 }
    855 
    856 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
    857                                           unsigned &NumIntermediates,
    858                                           MVT &RegisterVT,
    859                                           TargetLoweringBase *TLI) {
    860   // Figure out the right, legal destination reg to copy into.
    861   unsigned NumElts = VT.getVectorNumElements();
    862   MVT EltTy = VT.getVectorElementType();
    863 
    864   unsigned NumVectorRegs = 1;
    865 
    866   // FIXME: We don't support non-power-of-2-sized vectors for now.  Ideally we
    867   // could break down into LHS/RHS like LegalizeDAG does.
    868   if (!isPowerOf2_32(NumElts)) {
    869     NumVectorRegs = NumElts;
    870     NumElts = 1;
    871   }
    872 
    873   // Divide the input until we get to a supported size.  This will always
    874   // end with a scalar if the target doesn't support vectors.
    875   while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
    876     NumElts >>= 1;
    877     NumVectorRegs <<= 1;
    878   }
    879 
    880   NumIntermediates = NumVectorRegs;
    881 
    882   MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
    883   if (!TLI->isTypeLegal(NewVT))
    884     NewVT = EltTy;
    885   IntermediateVT = NewVT;
    886 
    887   unsigned NewVTSize = NewVT.getSizeInBits();
    888 
    889   // Convert sizes such as i33 to i64.
    890   if (!isPowerOf2_32(NewVTSize))
    891     NewVTSize = NextPowerOf2(NewVTSize);
    892 
    893   MVT DestVT = TLI->getRegisterType(NewVT);
    894   RegisterVT = DestVT;
    895   if (EVT(DestVT).bitsLT(NewVT))    // Value is expanded, e.g. i64 -> i16.
    896     return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
    897 
    898   // Otherwise, promotion or legal types use the same number of registers as
    899   // the vector decimated to the appropriate level.
    900   return NumVectorRegs;
    901 }
    902 
    903 /// isLegalRC - Return true if the value types that can be represented by the
    904 /// specified register class are all legal.
    905 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
    906                                    const TargetRegisterClass &RC) const {
    907   for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
    908     if (isTypeLegal(*I))
    909       return true;
    910   return false;
    911 }
    912 
    913 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
    914 /// sequence of memory operands that is recognized by PrologEpilogInserter.
    915 MachineBasicBlock *
    916 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
    917                                    MachineBasicBlock *MBB) const {
    918   MachineInstr *MI = &InitialMI;
    919   MachineFunction &MF = *MI->getMF();
    920   MachineFrameInfo &MFI = MF.getFrameInfo();
    921 
    922   // We're handling multiple types of operands here:
    923   // PATCHPOINT MetaArgs - live-in, read only, direct
    924   // STATEPOINT Deopt Spill - live-through, read only, indirect
    925   // STATEPOINT Deopt Alloca - live-through, read only, direct
    926   // (We're currently conservative and mark the deopt slots read/write in
    927   // practice.)
    928   // STATEPOINT GC Spill - live-through, read/write, indirect
    929   // STATEPOINT GC Alloca - live-through, read/write, direct
    930   // The live-in vs live-through is handled already (the live through ones are
    931   // all stack slots), but we need to handle the different type of stackmap
    932   // operands and memory effects here.
    933 
    934   // MI changes inside this loop as we grow operands.
    935   for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
    936     MachineOperand &MO = MI->getOperand(OperIdx);
    937     if (!MO.isFI())
    938       continue;
    939 
    940     // foldMemoryOperand builds a new MI after replacing a single FI operand
    941     // with the canonical set of five x86 addressing-mode operands.
    942     int FI = MO.getIndex();
    943     MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
    944 
    945     // Copy operands before the frame-index.
    946     for (unsigned i = 0; i < OperIdx; ++i)
    947       MIB.add(MI->getOperand(i));
    948     // Add frame index operands recognized by stackmaps.cpp
    949     if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
    950       // indirect-mem-ref tag, size, #FI, offset.
    951       // Used for spills inserted by StatepointLowering.  This codepath is not
    952       // used for patchpoints/stackmaps at all, for these spilling is done via
    953       // foldMemoryOperand callback only.
    954       assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
    955       MIB.addImm(StackMaps::IndirectMemRefOp);
    956       MIB.addImm(MFI.getObjectSize(FI));
    957       MIB.add(MI->getOperand(OperIdx));
    958       MIB.addImm(0);
    959     } else {
    960       // direct-mem-ref tag, #FI, offset.
    961       // Used by patchpoint, and direct alloca arguments to statepoints
    962       MIB.addImm(StackMaps::DirectMemRefOp);
    963       MIB.add(MI->getOperand(OperIdx));
    964       MIB.addImm(0);
    965     }
    966     // Copy the operands after the frame index.
    967     for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
    968       MIB.add(MI->getOperand(i));
    969 
    970     // Inherit previous memory operands.
    971     MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
    972     assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
    973 
    974     // Add a new memory operand for this FI.
    975     assert(MFI.getObjectOffset(FI) != -1);
    976 
    977     auto Flags = MachineMemOperand::MOLoad;
    978     if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
    979       Flags |= MachineMemOperand::MOStore;
    980       Flags |= MachineMemOperand::MOVolatile;
    981     }
    982     MachineMemOperand *MMO = MF.getMachineMemOperand(
    983         MachinePointerInfo::getFixedStack(MF, FI), Flags,
    984         MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
    985     MIB->addMemOperand(MF, MMO);
    986 
    987     // Replace the instruction and update the operand index.
    988     MBB->insert(MachineBasicBlock::iterator(MI), MIB);
    989     OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
    990     MI->eraseFromParent();
    991     MI = MIB;
    992   }
    993   return MBB;
    994 }
    995 
    996 MachineBasicBlock *
    997 TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI,
    998                                         MachineBasicBlock *MBB) const {
    999   assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
   1000          "Called emitXRayCustomEvent on the wrong MI!");
   1001   auto &MF = *MI.getMF();
   1002   auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
   1003   for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
   1004     MIB.add(MI.getOperand(OpIdx));
   1005 
   1006   MBB->insert(MachineBasicBlock::iterator(MI), MIB);
   1007   MI.eraseFromParent();
   1008   return MBB;
   1009 }
   1010 
   1011 MachineBasicBlock *
   1012 TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI,
   1013                                        MachineBasicBlock *MBB) const {
   1014   assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
   1015          "Called emitXRayTypedEvent on the wrong MI!");
   1016   auto &MF = *MI.getMF();
   1017   auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
   1018   for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
   1019     MIB.add(MI.getOperand(OpIdx));
   1020 
   1021   MBB->insert(MachineBasicBlock::iterator(MI), MIB);
   1022   MI.eraseFromParent();
   1023   return MBB;
   1024 }
   1025 
   1026 /// findRepresentativeClass - Return the largest legal super-reg register class
   1027 /// of the register class for the specified type and its associated "cost".
   1028 // This function is in TargetLowering because it uses RegClassForVT which would
   1029 // need to be moved to TargetRegisterInfo and would necessitate moving
   1030 // isTypeLegal over as well - a massive change that would just require
   1031 // TargetLowering having a TargetRegisterInfo class member that it would use.
   1032 std::pair<const TargetRegisterClass *, uint8_t>
   1033 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
   1034                                             MVT VT) const {
   1035   const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
   1036   if (!RC)
   1037     return std::make_pair(RC, 0);
   1038 
   1039   // Compute the set of all super-register classes.
   1040   BitVector SuperRegRC(TRI->getNumRegClasses());
   1041   for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
   1042     SuperRegRC.setBitsInMask(RCI.getMask());
   1043 
   1044   // Find the first legal register class with the largest spill size.
   1045   const TargetRegisterClass *BestRC = RC;
   1046   for (unsigned i : SuperRegRC.set_bits()) {
   1047     const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
   1048     // We want the largest possible spill size.
   1049     if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
   1050       continue;
   1051     if (!isLegalRC(*TRI, *SuperRC))
   1052       continue;
   1053     BestRC = SuperRC;
   1054   }
   1055   return std::make_pair(BestRC, 1);
   1056 }
   1057 
   1058 /// computeRegisterProperties - Once all of the register classes are added,
   1059 /// this allows us to compute derived properties we expose.
   1060 void TargetLoweringBase::computeRegisterProperties(
   1061     const TargetRegisterInfo *TRI) {
   1062   static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
   1063                 "Too many value types for ValueTypeActions to hold!");
   1064 
   1065   // Everything defaults to needing one register.
   1066   for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
   1067     NumRegistersForVT[i] = 1;
   1068     RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
   1069   }
   1070   // ...except isVoid, which doesn't need any registers.
   1071   NumRegistersForVT[MVT::isVoid] = 0;
   1072 
   1073   // Find the largest integer register class.
   1074   unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
   1075   for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
   1076     assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
   1077 
   1078   // Every integer value type larger than this largest register takes twice as
   1079   // many registers to represent as the previous ValueType.
   1080   for (unsigned ExpandedReg = LargestIntReg + 1;
   1081        ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
   1082     NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
   1083     RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
   1084     TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
   1085     ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
   1086                                    TypeExpandInteger);
   1087   }
   1088 
   1089   // Inspect all of the ValueType's smaller than the largest integer
   1090   // register to see which ones need promotion.
   1091   unsigned LegalIntReg = LargestIntReg;
   1092   for (unsigned IntReg = LargestIntReg - 1;
   1093        IntReg >= (unsigned)MVT::i1; --IntReg) {
   1094     MVT IVT = (MVT::SimpleValueType)IntReg;
   1095     if (isTypeLegal(IVT)) {
   1096       LegalIntReg = IntReg;
   1097     } else {
   1098       RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
   1099         (const MVT::SimpleValueType)LegalIntReg;
   1100       ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
   1101     }
   1102   }
   1103 
   1104   // ppcf128 type is really two f64's.
   1105   if (!isTypeLegal(MVT::ppcf128)) {
   1106     if (isTypeLegal(MVT::f64)) {
   1107       NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
   1108       RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
   1109       TransformToType[MVT::ppcf128] = MVT::f64;
   1110       ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
   1111     } else {
   1112       NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
   1113       RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
   1114       TransformToType[MVT::ppcf128] = MVT::i128;
   1115       ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
   1116     }
   1117   }
   1118 
   1119   // Decide how to handle f128. If the target does not have native f128 support,
   1120   // expand it to i128 and we will be generating soft float library calls.
   1121   if (!isTypeLegal(MVT::f128)) {
   1122     NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
   1123     RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
   1124     TransformToType[MVT::f128] = MVT::i128;
   1125     ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
   1126   }
   1127 
   1128   // Decide how to handle f64. If the target does not have native f64 support,
   1129   // expand it to i64 and we will be generating soft float library calls.
   1130   if (!isTypeLegal(MVT::f64)) {
   1131     NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
   1132     RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
   1133     TransformToType[MVT::f64] = MVT::i64;
   1134     ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
   1135   }
   1136 
   1137   // Decide how to handle f32. If the target does not have native f32 support,
   1138   // expand it to i32 and we will be generating soft float library calls.
   1139   if (!isTypeLegal(MVT::f32)) {
   1140     NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
   1141     RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
   1142     TransformToType[MVT::f32] = MVT::i32;
   1143     ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
   1144   }
   1145 
   1146   // Decide how to handle f16. If the target does not have native f16 support,
   1147   // promote it to f32, because there are no f16 library calls (except for
   1148   // conversions).
   1149   if (!isTypeLegal(MVT::f16)) {
   1150     NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
   1151     RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
   1152     TransformToType[MVT::f16] = MVT::f32;
   1153     ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
   1154   }
   1155 
   1156   // Loop over all of the vector value types to see which need transformations.
   1157   for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
   1158        i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
   1159     MVT VT = (MVT::SimpleValueType) i;
   1160     if (isTypeLegal(VT))
   1161       continue;
   1162 
   1163     MVT EltVT = VT.getVectorElementType();
   1164     unsigned NElts = VT.getVectorNumElements();
   1165     bool IsLegalWiderType = false;
   1166     LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
   1167     switch (PreferredAction) {
   1168     case TypePromoteInteger:
   1169       // Try to promote the elements of integer vectors. If no legal
   1170       // promotion was found, fall through to the widen-vector method.
   1171       for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
   1172         MVT SVT = (MVT::SimpleValueType) nVT;
   1173         // Promote vectors of integers to vectors with the same number
   1174         // of elements, with a wider element type.
   1175         if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
   1176             SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
   1177           TransformToType[i] = SVT;
   1178           RegisterTypeForVT[i] = SVT;
   1179           NumRegistersForVT[i] = 1;
   1180           ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
   1181           IsLegalWiderType = true;
   1182           break;
   1183         }
   1184       }
   1185       if (IsLegalWiderType)
   1186         break;
   1187       LLVM_FALLTHROUGH;
   1188 
   1189     case TypeWidenVector:
   1190       // Try to widen the vector.
   1191       for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
   1192         MVT SVT = (MVT::SimpleValueType) nVT;
   1193         if (SVT.getVectorElementType() == EltVT
   1194             && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
   1195           TransformToType[i] = SVT;
   1196           RegisterTypeForVT[i] = SVT;
   1197           NumRegistersForVT[i] = 1;
   1198           ValueTypeActions.setTypeAction(VT, TypeWidenVector);
   1199           IsLegalWiderType = true;
   1200           break;
   1201         }
   1202       }
   1203       if (IsLegalWiderType)
   1204         break;
   1205       LLVM_FALLTHROUGH;
   1206 
   1207     case TypeSplitVector:
   1208     case TypeScalarizeVector: {
   1209       MVT IntermediateVT;
   1210       MVT RegisterVT;
   1211       unsigned NumIntermediates;
   1212       NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
   1213           NumIntermediates, RegisterVT, this);
   1214       RegisterTypeForVT[i] = RegisterVT;
   1215 
   1216       MVT NVT = VT.getPow2VectorType();
   1217       if (NVT == VT) {
   1218         // Type is already a power of 2.  The default action is to split.
   1219         TransformToType[i] = MVT::Other;
   1220         if (PreferredAction == TypeScalarizeVector)
   1221           ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
   1222         else if (PreferredAction == TypeSplitVector)
   1223           ValueTypeActions.setTypeAction(VT, TypeSplitVector);
   1224         else
   1225           // Set type action according to the number of elements.
   1226           ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
   1227                                                         : TypeSplitVector);
   1228       } else {
   1229         TransformToType[i] = NVT;
   1230         ValueTypeActions.setTypeAction(VT, TypeWidenVector);
   1231       }
   1232       break;
   1233     }
   1234     default:
   1235       llvm_unreachable("Unknown vector legalization action!");
   1236     }
   1237   }
   1238 
   1239   // Determine the 'representative' register class for each value type.
   1240   // An representative register class is the largest (meaning one which is
   1241   // not a sub-register class / subreg register class) legal register class for
   1242   // a group of value types. For example, on i386, i8, i16, and i32
   1243   // representative would be GR32; while on x86_64 it's GR64.
   1244   for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
   1245     const TargetRegisterClass* RRC;
   1246     uint8_t Cost;
   1247     std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
   1248     RepRegClassForVT[i] = RRC;
   1249     RepRegClassCostForVT[i] = Cost;
   1250   }
   1251 }
   1252 
   1253 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
   1254                                            EVT VT) const {
   1255   assert(!VT.isVector() && "No default SetCC type for vectors!");
   1256   return getPointerTy(DL).SimpleTy;
   1257 }
   1258 
   1259 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
   1260   return MVT::i32; // return the default value
   1261 }
   1262 
   1263 /// getVectorTypeBreakdown - Vector types are broken down into some number of
   1264 /// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
   1265 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
   1266 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
   1267 ///
   1268 /// This method returns the number of registers needed, and the VT for each
   1269 /// register.  It also returns the VT and quantity of the intermediate values
   1270 /// before they are promoted/expanded.
   1271 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
   1272                                                 EVT &IntermediateVT,
   1273                                                 unsigned &NumIntermediates,
   1274                                                 MVT &RegisterVT) const {
   1275   unsigned NumElts = VT.getVectorNumElements();
   1276 
   1277   // If there is a wider vector type with the same element type as this one,
   1278   // or a promoted vector type that has the same number of elements which
   1279   // are wider, then we should convert to that legal vector type.
   1280   // This handles things like <2 x float> -> <4 x float> and
   1281   // <4 x i1> -> <4 x i32>.
   1282   LegalizeTypeAction TA = getTypeAction(Context, VT);
   1283   if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
   1284     EVT RegisterEVT = getTypeToTransformTo(Context, VT);
   1285     if (isTypeLegal(RegisterEVT)) {
   1286       IntermediateVT = RegisterEVT;
   1287       RegisterVT = RegisterEVT.getSimpleVT();
   1288       NumIntermediates = 1;
   1289       return 1;
   1290     }
   1291   }
   1292 
   1293   // Figure out the right, legal destination reg to copy into.
   1294   EVT EltTy = VT.getVectorElementType();
   1295 
   1296   unsigned NumVectorRegs = 1;
   1297 
   1298   // FIXME: We don't support non-power-of-2-sized vectors for now.  Ideally we
   1299   // could break down into LHS/RHS like LegalizeDAG does.
   1300   if (!isPowerOf2_32(NumElts)) {
   1301     NumVectorRegs = NumElts;
   1302     NumElts = 1;
   1303   }
   1304 
   1305   // Divide the input until we get to a supported size.  This will always
   1306   // end with a scalar if the target doesn't support vectors.
   1307   while (NumElts > 1 && !isTypeLegal(
   1308                                    EVT::getVectorVT(Context, EltTy, NumElts))) {
   1309     NumElts >>= 1;
   1310     NumVectorRegs <<= 1;
   1311   }
   1312 
   1313   NumIntermediates = NumVectorRegs;
   1314 
   1315   EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
   1316   if (!isTypeLegal(NewVT))
   1317     NewVT = EltTy;
   1318   IntermediateVT = NewVT;
   1319 
   1320   MVT DestVT = getRegisterType(Context, NewVT);
   1321   RegisterVT = DestVT;
   1322   unsigned NewVTSize = NewVT.getSizeInBits();
   1323 
   1324   // Convert sizes such as i33 to i64.
   1325   if (!isPowerOf2_32(NewVTSize))
   1326     NewVTSize = NextPowerOf2(NewVTSize);
   1327 
   1328   if (EVT(DestVT).bitsLT(NewVT))   // Value is expanded, e.g. i64 -> i16.
   1329     return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
   1330 
   1331   // Otherwise, promotion or legal types use the same number of registers as
   1332   // the vector decimated to the appropriate level.
   1333   return NumVectorRegs;
   1334 }
   1335 
   1336 /// Get the EVTs and ArgFlags collections that represent the legalized return
   1337 /// type of the given function.  This does not require a DAG or a return value,
   1338 /// and is suitable for use before any DAGs for the function are constructed.
   1339 /// TODO: Move this out of TargetLowering.cpp.
   1340 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
   1341                          AttributeList attr,
   1342                          SmallVectorImpl<ISD::OutputArg> &Outs,
   1343                          const TargetLowering &TLI, const DataLayout &DL) {
   1344   SmallVector<EVT, 4> ValueVTs;
   1345   ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
   1346   unsigned NumValues = ValueVTs.size();
   1347   if (NumValues == 0) return;
   1348 
   1349   for (unsigned j = 0, f = NumValues; j != f; ++j) {
   1350     EVT VT = ValueVTs[j];
   1351     ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
   1352 
   1353     if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
   1354       ExtendKind = ISD::SIGN_EXTEND;
   1355     else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
   1356       ExtendKind = ISD::ZERO_EXTEND;
   1357 
   1358     // FIXME: C calling convention requires the return type to be promoted to
   1359     // at least 32-bit. But this is not necessary for non-C calling
   1360     // conventions. The frontend should mark functions whose return values
   1361     // require promoting with signext or zeroext attributes.
   1362     if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
   1363       MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
   1364       if (VT.bitsLT(MinVT))
   1365         VT = MinVT;
   1366     }
   1367 
   1368     unsigned NumParts =
   1369         TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
   1370     MVT PartVT =
   1371         TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
   1372 
   1373     // 'inreg' on function refers to return value
   1374     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
   1375     if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
   1376       Flags.setInReg();
   1377 
   1378     // Propagate extension type if any
   1379     if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
   1380       Flags.setSExt();
   1381     else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
   1382       Flags.setZExt();
   1383 
   1384     for (unsigned i = 0; i < NumParts; ++i)
   1385       Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
   1386   }
   1387 }
   1388 
   1389 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
   1390 /// function arguments in the caller parameter area.  This is the actual
   1391 /// alignment, not its logarithm.
   1392 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
   1393                                                    const DataLayout &DL) const {
   1394   return DL.getABITypeAlignment(Ty);
   1395 }
   1396 
   1397 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
   1398                                             const DataLayout &DL, EVT VT,
   1399                                             unsigned AddrSpace,
   1400                                             unsigned Alignment,
   1401                                             bool *Fast) const {
   1402   // Check if the specified alignment is sufficient based on the data layout.
   1403   // TODO: While using the data layout works in practice, a better solution
   1404   // would be to implement this check directly (make this a virtual function).
   1405   // For example, the ABI alignment may change based on software platform while
   1406   // this function should only be affected by hardware implementation.
   1407   Type *Ty = VT.getTypeForEVT(Context);
   1408   if (Alignment >= DL.getABITypeAlignment(Ty)) {
   1409     // Assume that an access that meets the ABI-specified alignment is fast.
   1410     if (Fast != nullptr)
   1411       *Fast = true;
   1412     return true;
   1413   }
   1414 
   1415   // This is a misaligned access.
   1416   return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
   1417 }
   1418 
   1419 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
   1420   return BranchProbability(MinPercentageForPredictableBranch, 100);
   1421 }
   1422 
   1423 //===----------------------------------------------------------------------===//
   1424 //  TargetTransformInfo Helpers
   1425 //===----------------------------------------------------------------------===//
   1426 
   1427 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
   1428   enum InstructionOpcodes {
   1429 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
   1430 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
   1431 #include "llvm/IR/Instruction.def"
   1432   };
   1433   switch (static_cast<InstructionOpcodes>(Opcode)) {
   1434   case Ret:            return 0;
   1435   case Br:             return 0;
   1436   case Switch:         return 0;
   1437   case IndirectBr:     return 0;
   1438   case Invoke:         return 0;
   1439   case Resume:         return 0;
   1440   case Unreachable:    return 0;
   1441   case CleanupRet:     return 0;
   1442   case CatchRet:       return 0;
   1443   case CatchPad:       return 0;
   1444   case CatchSwitch:    return 0;
   1445   case CleanupPad:     return 0;
   1446   case Add:            return ISD::ADD;
   1447   case FAdd:           return ISD::FADD;
   1448   case Sub:            return ISD::SUB;
   1449   case FSub:           return ISD::FSUB;
   1450   case Mul:            return ISD::MUL;
   1451   case FMul:           return ISD::FMUL;
   1452   case UDiv:           return ISD::UDIV;
   1453   case SDiv:           return ISD::SDIV;
   1454   case FDiv:           return ISD::FDIV;
   1455   case URem:           return ISD::UREM;
   1456   case SRem:           return ISD::SREM;
   1457   case FRem:           return ISD::FREM;
   1458   case Shl:            return ISD::SHL;
   1459   case LShr:           return ISD::SRL;
   1460   case AShr:           return ISD::SRA;
   1461   case And:            return ISD::AND;
   1462   case Or:             return ISD::OR;
   1463   case Xor:            return ISD::XOR;
   1464   case Alloca:         return 0;
   1465   case Load:           return ISD::LOAD;
   1466   case Store:          return ISD::STORE;
   1467   case GetElementPtr:  return 0;
   1468   case Fence:          return 0;
   1469   case AtomicCmpXchg:  return 0;
   1470   case AtomicRMW:      return 0;
   1471   case Trunc:          return ISD::TRUNCATE;
   1472   case ZExt:           return ISD::ZERO_EXTEND;
   1473   case SExt:           return ISD::SIGN_EXTEND;
   1474   case FPToUI:         return ISD::FP_TO_UINT;
   1475   case FPToSI:         return ISD::FP_TO_SINT;
   1476   case UIToFP:         return ISD::UINT_TO_FP;
   1477   case SIToFP:         return ISD::SINT_TO_FP;
   1478   case FPTrunc:        return ISD::FP_ROUND;
   1479   case FPExt:          return ISD::FP_EXTEND;
   1480   case PtrToInt:       return ISD::BITCAST;
   1481   case IntToPtr:       return ISD::BITCAST;
   1482   case BitCast:        return ISD::BITCAST;
   1483   case AddrSpaceCast:  return ISD::ADDRSPACECAST;
   1484   case ICmp:           return ISD::SETCC;
   1485   case FCmp:           return ISD::SETCC;
   1486   case PHI:            return 0;
   1487   case Call:           return 0;
   1488   case Select:         return ISD::SELECT;
   1489   case UserOp1:        return 0;
   1490   case UserOp2:        return 0;
   1491   case VAArg:          return 0;
   1492   case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
   1493   case InsertElement:  return ISD::INSERT_VECTOR_ELT;
   1494   case ShuffleVector:  return ISD::VECTOR_SHUFFLE;
   1495   case ExtractValue:   return ISD::MERGE_VALUES;
   1496   case InsertValue:    return ISD::MERGE_VALUES;
   1497   case LandingPad:     return 0;
   1498   }
   1499 
   1500   llvm_unreachable("Unknown instruction type encountered!");
   1501 }
   1502 
   1503 std::pair<int, MVT>
   1504 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
   1505                                             Type *Ty) const {
   1506   LLVMContext &C = Ty->getContext();
   1507   EVT MTy = getValueType(DL, Ty);
   1508 
   1509   int Cost = 1;
   1510   // We keep legalizing the type until we find a legal kind. We assume that
   1511   // the only operation that costs anything is the split. After splitting
   1512   // we need to handle two types.
   1513   while (true) {
   1514     LegalizeKind LK = getTypeConversion(C, MTy);
   1515 
   1516     if (LK.first == TypeLegal)
   1517       return std::make_pair(Cost, MTy.getSimpleVT());
   1518 
   1519     if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
   1520       Cost *= 2;
   1521 
   1522     // Do not loop with f128 type.
   1523     if (MTy == LK.second)
   1524       return std::make_pair(Cost, MTy.getSimpleVT());
   1525 
   1526     // Keep legalizing the type.
   1527     MTy = LK.second;
   1528   }
   1529 }
   1530 
   1531 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
   1532                                                               bool UseTLS) const {
   1533   // compiler-rt provides a variable with a magic name.  Targets that do not
   1534   // link with compiler-rt may also provide such a variable.
   1535   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
   1536   const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
   1537   auto UnsafeStackPtr =
   1538       dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
   1539 
   1540   Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
   1541 
   1542   if (!UnsafeStackPtr) {
   1543     auto TLSModel = UseTLS ?
   1544         GlobalValue::InitialExecTLSModel :
   1545         GlobalValue::NotThreadLocal;
   1546     // The global variable is not defined yet, define it ourselves.
   1547     // We use the initial-exec TLS model because we do not support the
   1548     // variable living anywhere other than in the main executable.
   1549     UnsafeStackPtr = new GlobalVariable(
   1550         *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
   1551         UnsafeStackPtrVar, nullptr, TLSModel);
   1552   } else {
   1553     // The variable exists, check its type and attributes.
   1554     if (UnsafeStackPtr->getValueType() != StackPtrTy)
   1555       report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
   1556     if (UseTLS != UnsafeStackPtr->isThreadLocal())
   1557       report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
   1558                          (UseTLS ? "" : "not ") + "be thread-local");
   1559   }
   1560   return UnsafeStackPtr;
   1561 }
   1562 
   1563 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
   1564   if (!TM.getTargetTriple().isAndroid())
   1565     return getDefaultSafeStackPointerLocation(IRB, true);
   1566 
   1567   // Android provides a libc function to retrieve the address of the current
   1568   // thread's unsafe stack pointer.
   1569   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
   1570   Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
   1571   Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
   1572                                      StackPtrTy->getPointerTo(0));
   1573   return IRB.CreateCall(Fn);
   1574 }
   1575 
   1576 //===----------------------------------------------------------------------===//
   1577 //  Loop Strength Reduction hooks
   1578 //===----------------------------------------------------------------------===//
   1579 
   1580 /// isLegalAddressingMode - Return true if the addressing mode represented
   1581 /// by AM is legal for this target, for a load/store of the specified type.
   1582 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
   1583                                                const AddrMode &AM, Type *Ty,
   1584                                                unsigned AS, Instruction *I) const {
   1585   // The default implementation of this implements a conservative RISCy, r+r and
   1586   // r+i addr mode.
   1587 
   1588   // Allows a sign-extended 16-bit immediate field.
   1589   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
   1590     return false;
   1591 
   1592   // No global is ever allowed as a base.
   1593   if (AM.BaseGV)
   1594     return false;
   1595 
   1596   // Only support r+r,
   1597   switch (AM.Scale) {
   1598   case 0:  // "r+i" or just "i", depending on HasBaseReg.
   1599     break;
   1600   case 1:
   1601     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
   1602       return false;
   1603     // Otherwise we have r+r or r+i.
   1604     break;
   1605   case 2:
   1606     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
   1607       return false;
   1608     // Allow 2*r as r+r.
   1609     break;
   1610   default: // Don't allow n * r
   1611     return false;
   1612   }
   1613 
   1614   return true;
   1615 }
   1616 
   1617 //===----------------------------------------------------------------------===//
   1618 //  Stack Protector
   1619 //===----------------------------------------------------------------------===//
   1620 
   1621 // For OpenBSD return its special guard variable. Otherwise return nullptr,
   1622 // so that SelectionDAG handle SSP.
   1623 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
   1624   if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
   1625     Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
   1626     PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
   1627     return M.getOrInsertGlobal("__guard_local", PtrTy);
   1628   }
   1629   return nullptr;
   1630 }
   1631 
   1632 // Currently only support "standard" __stack_chk_guard.
   1633 // TODO: add LOAD_STACK_GUARD support.
   1634 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
   1635   if (!M.getNamedValue("__stack_chk_guard"))
   1636     new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
   1637                        GlobalVariable::ExternalLinkage,
   1638                        nullptr, "__stack_chk_guard");
   1639 }
   1640 
   1641 // Currently only support "standard" __stack_chk_guard.
   1642 // TODO: add LOAD_STACK_GUARD support.
   1643 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
   1644   return M.getNamedValue("__stack_chk_guard");
   1645 }
   1646 
   1647 Value *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
   1648   return nullptr;
   1649 }
   1650 
   1651 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
   1652   return MinimumJumpTableEntries;
   1653 }
   1654 
   1655 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
   1656   MinimumJumpTableEntries = Val;
   1657 }
   1658 
   1659 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
   1660   return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
   1661 }
   1662 
   1663 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
   1664   return MaximumJumpTableSize;
   1665 }
   1666 
   1667 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
   1668   MaximumJumpTableSize = Val;
   1669 }
   1670 
   1671 //===----------------------------------------------------------------------===//
   1672 //  Reciprocal Estimates
   1673 //===----------------------------------------------------------------------===//
   1674 
   1675 /// Get the reciprocal estimate attribute string for a function that will
   1676 /// override the target defaults.
   1677 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
   1678   const Function &F = MF.getFunction();
   1679   return F.getFnAttribute("reciprocal-estimates").getValueAsString();
   1680 }
   1681 
   1682 /// Construct a string for the given reciprocal operation of the given type.
   1683 /// This string should match the corresponding option to the front-end's
   1684 /// "-mrecip" flag assuming those strings have been passed through in an
   1685 /// attribute string. For example, "vec-divf" for a division of a vXf32.
   1686 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
   1687   std::string Name = VT.isVector() ? "vec-" : "";
   1688 
   1689   Name += IsSqrt ? "sqrt" : "div";
   1690 
   1691   // TODO: Handle "half" or other float types?
   1692   if (VT.getScalarType() == MVT::f64) {
   1693     Name += "d";
   1694   } else {
   1695     assert(VT.getScalarType() == MVT::f32 &&
   1696            "Unexpected FP type for reciprocal estimate");
   1697     Name += "f";
   1698   }
   1699 
   1700   return Name;
   1701 }
   1702 
   1703 /// Return the character position and value (a single numeric character) of a
   1704 /// customized refinement operation in the input string if it exists. Return
   1705 /// false if there is no customized refinement step count.
   1706 static bool parseRefinementStep(StringRef In, size_t &Position,
   1707                                 uint8_t &Value) {
   1708   const char RefStepToken = ':';
   1709   Position = In.find(RefStepToken);
   1710   if (Position == StringRef::npos)
   1711     return false;
   1712 
   1713   StringRef RefStepString = In.substr(Position + 1);
   1714   // Allow exactly one numeric character for the additional refinement
   1715   // step parameter.
   1716   if (RefStepString.size() == 1) {
   1717     char RefStepChar = RefStepString[0];
   1718     if (RefStepChar >= '0' && RefStepChar <= '9') {
   1719       Value = RefStepChar - '0';
   1720       return true;
   1721     }
   1722   }
   1723   report_fatal_error("Invalid refinement step for -recip.");
   1724 }
   1725 
   1726 /// For the input attribute string, return one of the ReciprocalEstimate enum
   1727 /// status values (enabled, disabled, or not specified) for this operation on
   1728 /// the specified data type.
   1729 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
   1730   if (Override.empty())
   1731     return TargetLoweringBase::ReciprocalEstimate::Unspecified;
   1732 
   1733   SmallVector<StringRef, 4> OverrideVector;
   1734   Override.split(OverrideVector, ',');
   1735   unsigned NumArgs = OverrideVector.size();
   1736 
   1737   // Check if "all", "none", or "default" was specified.
   1738   if (NumArgs == 1) {
   1739     // Look for an optional setting of the number of refinement steps needed
   1740     // for this type of reciprocal operation.
   1741     size_t RefPos;
   1742     uint8_t RefSteps;
   1743     if (parseRefinementStep(Override, RefPos, RefSteps)) {
   1744       // Split the string for further processing.
   1745       Override = Override.substr(0, RefPos);
   1746     }
   1747 
   1748     // All reciprocal types are enabled.
   1749     if (Override == "all")
   1750       return TargetLoweringBase::ReciprocalEstimate::Enabled;
   1751 
   1752     // All reciprocal types are disabled.
   1753     if (Override == "none")
   1754       return TargetLoweringBase::ReciprocalEstimate::Disabled;
   1755 
   1756     // Target defaults for enablement are used.
   1757     if (Override == "default")
   1758       return TargetLoweringBase::ReciprocalEstimate::Unspecified;
   1759   }
   1760 
   1761   // The attribute string may omit the size suffix ('f'/'d').
   1762   std::string VTName = getReciprocalOpName(IsSqrt, VT);
   1763   std::string VTNameNoSize = VTName;
   1764   VTNameNoSize.pop_back();
   1765   static const char DisabledPrefix = '!';
   1766 
   1767   for (StringRef RecipType : OverrideVector) {
   1768     size_t RefPos;
   1769     uint8_t RefSteps;
   1770     if (parseRefinementStep(RecipType, RefPos, RefSteps))
   1771       RecipType = RecipType.substr(0, RefPos);
   1772 
   1773     // Ignore the disablement token for string matching.
   1774     bool IsDisabled = RecipType[0] == DisabledPrefix;
   1775     if (IsDisabled)
   1776       RecipType = RecipType.substr(1);
   1777 
   1778     if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
   1779       return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
   1780                         : TargetLoweringBase::ReciprocalEstimate::Enabled;
   1781   }
   1782 
   1783   return TargetLoweringBase::ReciprocalEstimate::Unspecified;
   1784 }
   1785 
   1786 /// For the input attribute string, return the customized refinement step count
   1787 /// for this operation on the specified data type. If the step count does not
   1788 /// exist, return the ReciprocalEstimate enum value for unspecified.
   1789 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
   1790   if (Override.empty())
   1791     return TargetLoweringBase::ReciprocalEstimate::Unspecified;
   1792 
   1793   SmallVector<StringRef, 4> OverrideVector;
   1794   Override.split(OverrideVector, ',');
   1795   unsigned NumArgs = OverrideVector.size();
   1796 
   1797   // Check if "all", "default", or "none" was specified.
   1798   if (NumArgs == 1) {
   1799     // Look for an optional setting of the number of refinement steps needed
   1800     // for this type of reciprocal operation.
   1801     size_t RefPos;
   1802     uint8_t RefSteps;
   1803     if (!parseRefinementStep(Override, RefPos, RefSteps))
   1804       return TargetLoweringBase::ReciprocalEstimate::Unspecified;
   1805 
   1806     // Split the string for further processing.
   1807     Override = Override.substr(0, RefPos);
   1808     assert(Override != "none" &&
   1809            "Disabled reciprocals, but specifed refinement steps?");
   1810 
   1811     // If this is a general override, return the specified number of steps.
   1812     if (Override == "all" || Override == "default")
   1813       return RefSteps;
   1814   }
   1815 
   1816   // The attribute string may omit the size suffix ('f'/'d').
   1817   std::string VTName = getReciprocalOpName(IsSqrt, VT);
   1818   std::string VTNameNoSize = VTName;
   1819   VTNameNoSize.pop_back();
   1820 
   1821   for (StringRef RecipType : OverrideVector) {
   1822     size_t RefPos;
   1823     uint8_t RefSteps;
   1824     if (!parseRefinementStep(RecipType, RefPos, RefSteps))
   1825       continue;
   1826 
   1827     RecipType = RecipType.substr(0, RefPos);
   1828     if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
   1829       return RefSteps;
   1830   }
   1831 
   1832   return TargetLoweringBase::ReciprocalEstimate::Unspecified;
   1833 }
   1834 
   1835 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
   1836                                                     MachineFunction &MF) const {
   1837   return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
   1838 }
   1839 
   1840 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
   1841                                                    MachineFunction &MF) const {
   1842   return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
   1843 }
   1844 
   1845 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
   1846                                                MachineFunction &MF) const {
   1847   return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
   1848 }
   1849 
   1850 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
   1851                                               MachineFunction &MF) const {
   1852   return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
   1853 }
   1854 
   1855 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
   1856   MF.getRegInfo().freezeReservedRegs(MF);
   1857 }
   1858