Home | History | Annotate | Download | only in AArch64
      1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 
     10 #include "AArch64TargetTransformInfo.h"
     11 #include "MCTargetDesc/AArch64AddressingModes.h"
     12 #include "llvm/Analysis/TargetTransformInfo.h"
     13 #include "llvm/Analysis/LoopInfo.h"
     14 #include "llvm/CodeGen/BasicTTIImpl.h"
     15 #include "llvm/Support/Debug.h"
     16 #include "llvm/Target/CostTable.h"
     17 #include "llvm/Target/TargetLowering.h"
     18 #include <algorithm>
     19 using namespace llvm;
     20 
     21 #define DEBUG_TYPE "aarch64tti"
     22 
     23 /// \brief Calculate the cost of materializing a 64-bit value. This helper
     24 /// method might only calculate a fraction of a larger immediate. Therefore it
     25 /// is valid to return a cost of ZERO.
     26 int AArch64TTIImpl::getIntImmCost(int64_t Val) {
     27   // Check if the immediate can be encoded within an instruction.
     28   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
     29     return 0;
     30 
     31   if (Val < 0)
     32     Val = ~Val;
     33 
     34   // Calculate how many moves we will need to materialize this constant.
     35   unsigned LZ = countLeadingZeros((uint64_t)Val);
     36   return (64 - LZ + 15) / 16;
     37 }
     38 
     39 /// \brief Calculate the cost of materializing the given constant.
     40 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
     41   assert(Ty->isIntegerTy());
     42 
     43   unsigned BitSize = Ty->getPrimitiveSizeInBits();
     44   if (BitSize == 0)
     45     return ~0U;
     46 
     47   // Sign-extend all constants to a multiple of 64-bit.
     48   APInt ImmVal = Imm;
     49   if (BitSize & 0x3f)
     50     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
     51 
     52   // Split the constant into 64-bit chunks and calculate the cost for each
     53   // chunk.
     54   int Cost = 0;
     55   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
     56     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
     57     int64_t Val = Tmp.getSExtValue();
     58     Cost += getIntImmCost(Val);
     59   }
     60   // We need at least one instruction to materialze the constant.
     61   return std::max(1, Cost);
     62 }
     63 
     64 int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
     65                                   const APInt &Imm, Type *Ty) {
     66   assert(Ty->isIntegerTy());
     67 
     68   unsigned BitSize = Ty->getPrimitiveSizeInBits();
     69   // There is no cost model for constants with a bit size of 0. Return TCC_Free
     70   // here, so that constant hoisting will ignore this constant.
     71   if (BitSize == 0)
     72     return TTI::TCC_Free;
     73 
     74   unsigned ImmIdx = ~0U;
     75   switch (Opcode) {
     76   default:
     77     return TTI::TCC_Free;
     78   case Instruction::GetElementPtr:
     79     // Always hoist the base address of a GetElementPtr.
     80     if (Idx == 0)
     81       return 2 * TTI::TCC_Basic;
     82     return TTI::TCC_Free;
     83   case Instruction::Store:
     84     ImmIdx = 0;
     85     break;
     86   case Instruction::Add:
     87   case Instruction::Sub:
     88   case Instruction::Mul:
     89   case Instruction::UDiv:
     90   case Instruction::SDiv:
     91   case Instruction::URem:
     92   case Instruction::SRem:
     93   case Instruction::And:
     94   case Instruction::Or:
     95   case Instruction::Xor:
     96   case Instruction::ICmp:
     97     ImmIdx = 1;
     98     break;
     99   // Always return TCC_Free for the shift value of a shift instruction.
    100   case Instruction::Shl:
    101   case Instruction::LShr:
    102   case Instruction::AShr:
    103     if (Idx == 1)
    104       return TTI::TCC_Free;
    105     break;
    106   case Instruction::Trunc:
    107   case Instruction::ZExt:
    108   case Instruction::SExt:
    109   case Instruction::IntToPtr:
    110   case Instruction::PtrToInt:
    111   case Instruction::BitCast:
    112   case Instruction::PHI:
    113   case Instruction::Call:
    114   case Instruction::Select:
    115   case Instruction::Ret:
    116   case Instruction::Load:
    117     break;
    118   }
    119 
    120   if (Idx == ImmIdx) {
    121     int NumConstants = (BitSize + 63) / 64;
    122     int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
    123     return (Cost <= NumConstants * TTI::TCC_Basic)
    124                ? static_cast<int>(TTI::TCC_Free)
    125                : Cost;
    126   }
    127   return AArch64TTIImpl::getIntImmCost(Imm, Ty);
    128 }
    129 
    130 int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
    131                                   const APInt &Imm, Type *Ty) {
    132   assert(Ty->isIntegerTy());
    133 
    134   unsigned BitSize = Ty->getPrimitiveSizeInBits();
    135   // There is no cost model for constants with a bit size of 0. Return TCC_Free
    136   // here, so that constant hoisting will ignore this constant.
    137   if (BitSize == 0)
    138     return TTI::TCC_Free;
    139 
    140   switch (IID) {
    141   default:
    142     return TTI::TCC_Free;
    143   case Intrinsic::sadd_with_overflow:
    144   case Intrinsic::uadd_with_overflow:
    145   case Intrinsic::ssub_with_overflow:
    146   case Intrinsic::usub_with_overflow:
    147   case Intrinsic::smul_with_overflow:
    148   case Intrinsic::umul_with_overflow:
    149     if (Idx == 1) {
    150       int NumConstants = (BitSize + 63) / 64;
    151       int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
    152       return (Cost <= NumConstants * TTI::TCC_Basic)
    153                  ? static_cast<int>(TTI::TCC_Free)
    154                  : Cost;
    155     }
    156     break;
    157   case Intrinsic::experimental_stackmap:
    158     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
    159       return TTI::TCC_Free;
    160     break;
    161   case Intrinsic::experimental_patchpoint_void:
    162   case Intrinsic::experimental_patchpoint_i64:
    163     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
    164       return TTI::TCC_Free;
    165     break;
    166   }
    167   return AArch64TTIImpl::getIntImmCost(Imm, Ty);
    168 }
    169 
    170 TargetTransformInfo::PopcntSupportKind
    171 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
    172   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
    173   if (TyWidth == 32 || TyWidth == 64)
    174     return TTI::PSK_FastHardware;
    175   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
    176   return TTI::PSK_Software;
    177 }
    178 
    179 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
    180   int ISD = TLI->InstructionOpcodeToISD(Opcode);
    181   assert(ISD && "Invalid opcode");
    182 
    183   EVT SrcTy = TLI->getValueType(DL, Src);
    184   EVT DstTy = TLI->getValueType(DL, Dst);
    185 
    186   if (!SrcTy.isSimple() || !DstTy.isSimple())
    187     return BaseT::getCastInstrCost(Opcode, Dst, Src);
    188 
    189   static const TypeConversionCostTblEntry
    190   ConversionTbl[] = {
    191     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
    192     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
    193     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
    194     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
    195 
    196     // The number of shll instructions for the extension.
    197     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
    198     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
    199     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
    200     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
    201     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
    202     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
    203     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
    204     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
    205     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
    206     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
    207     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
    208     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
    209     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
    210     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
    211     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
    212     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
    213 
    214     // LowerVectorINT_TO_FP:
    215     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
    216     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
    217     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
    218     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
    219     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
    220     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
    221 
    222     // Complex: to v2f32
    223     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
    224     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
    225     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
    226     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
    227     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
    228     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
    229 
    230     // Complex: to v4f32
    231     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
    232     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
    233     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
    234     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
    235 
    236     // Complex: to v8f32
    237     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
    238     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
    239     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
    240     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
    241 
    242     // Complex: to v16f32
    243     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
    244     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
    245 
    246     // Complex: to v2f64
    247     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
    248     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
    249     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
    250     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
    251     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
    252     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
    253 
    254 
    255     // LowerVectorFP_TO_INT
    256     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
    257     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
    258     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
    259     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
    260     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
    261     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
    262 
    263     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
    264     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
    265     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
    266     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
    267     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
    268     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
    269     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
    270 
    271     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
    272     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
    273     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
    274     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
    275     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
    276 
    277     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
    278     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
    279     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
    280     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
    281     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
    282     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
    283     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
    284   };
    285 
    286   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
    287                                                  DstTy.getSimpleVT(),
    288                                                  SrcTy.getSimpleVT()))
    289     return Entry->Cost;
    290 
    291   return BaseT::getCastInstrCost(Opcode, Dst, Src);
    292 }
    293 
    294 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
    295                                        unsigned Index) {
    296   assert(Val->isVectorTy() && "This must be a vector type");
    297 
    298   if (Index != -1U) {
    299     // Legalize the type.
    300     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
    301 
    302     // This type is legalized to a scalar type.
    303     if (!LT.second.isVector())
    304       return 0;
    305 
    306     // The type may be split. Normalize the index to the new type.
    307     unsigned Width = LT.second.getVectorNumElements();
    308     Index = Index % Width;
    309 
    310     // The element at index zero is already inside the vector.
    311     if (Index == 0)
    312       return 0;
    313   }
    314 
    315   // All other insert/extracts cost this much.
    316   return 3;
    317 }
    318 
    319 int AArch64TTIImpl::getArithmeticInstrCost(
    320     unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
    321     TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
    322     TTI::OperandValueProperties Opd2PropInfo) {
    323   // Legalize the type.
    324   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
    325 
    326   int ISD = TLI->InstructionOpcodeToISD(Opcode);
    327 
    328   if (ISD == ISD::SDIV &&
    329       Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
    330       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
    331     // On AArch64, scalar signed division by constants power-of-two are
    332     // normally expanded to the sequence ADD + CMP + SELECT + SRA.
    333     // The OperandValue properties many not be same as that of previous
    334     // operation; conservatively assume OP_None.
    335     int Cost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
    336                                       TargetTransformInfo::OP_None,
    337                                       TargetTransformInfo::OP_None);
    338     Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
    339                                    TargetTransformInfo::OP_None,
    340                                    TargetTransformInfo::OP_None);
    341     Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
    342                                    TargetTransformInfo::OP_None,
    343                                    TargetTransformInfo::OP_None);
    344     Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
    345                                    TargetTransformInfo::OP_None,
    346                                    TargetTransformInfo::OP_None);
    347     return Cost;
    348   }
    349 
    350   switch (ISD) {
    351   default:
    352     return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
    353                                          Opd1PropInfo, Opd2PropInfo);
    354   case ISD::ADD:
    355   case ISD::MUL:
    356   case ISD::XOR:
    357   case ISD::OR:
    358   case ISD::AND:
    359     // These nodes are marked as 'custom' for combining purposes only.
    360     // We know that they are legal. See LowerAdd in ISelLowering.
    361     return 1 * LT.first;
    362   }
    363 }
    364 
    365 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
    366   // Address computations in vectorized code with non-consecutive addresses will
    367   // likely result in more instructions compared to scalar code where the
    368   // computation can more often be merged into the index mode. The resulting
    369   // extra micro-ops can significantly decrease throughput.
    370   unsigned NumVectorInstToHideOverhead = 10;
    371 
    372   if (Ty->isVectorTy() && IsComplex)
    373     return NumVectorInstToHideOverhead;
    374 
    375   // In many cases the address computation is not merged into the instruction
    376   // addressing mode.
    377   return 1;
    378 }
    379 
    380 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
    381                                        Type *CondTy) {
    382 
    383   int ISD = TLI->InstructionOpcodeToISD(Opcode);
    384   // We don't lower some vector selects well that are wider than the register
    385   // width.
    386   if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
    387     // We would need this many instructions to hide the scalarization happening.
    388     const int AmortizationCost = 20;
    389     static const TypeConversionCostTblEntry
    390     VectorSelectTbl[] = {
    391       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
    392       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
    393       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
    394       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
    395       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
    396       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
    397     };
    398 
    399     EVT SelCondTy = TLI->getValueType(DL, CondTy);
    400     EVT SelValTy = TLI->getValueType(DL, ValTy);
    401     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
    402       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
    403                                                      SelCondTy.getSimpleVT(),
    404                                                      SelValTy.getSimpleVT()))
    405         return Entry->Cost;
    406     }
    407   }
    408   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
    409 }
    410 
    411 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
    412                                     unsigned Alignment, unsigned AddressSpace) {
    413   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
    414 
    415   if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
    416       Src->getVectorElementType()->isIntegerTy(64)) {
    417     // Unaligned stores are extremely inefficient. We don't split
    418     // unaligned v2i64 stores because the negative impact that has shown in
    419     // practice on inlined memcpy code.
    420     // We make v2i64 stores expensive so that we will only vectorize if there
    421     // are 6 other instructions getting vectorized.
    422     int AmortizationCost = 6;
    423 
    424     return LT.first * 2 * AmortizationCost;
    425   }
    426 
    427   if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
    428       Src->getVectorNumElements() < 8) {
    429     // We scalarize the loads/stores because there is not v.4b register and we
    430     // have to promote the elements to v.4h.
    431     unsigned NumVecElts = Src->getVectorNumElements();
    432     unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
    433     // We generate 2 instructions per vector element.
    434     return NumVectorizableInstsToAmortize * NumVecElts * 2;
    435   }
    436 
    437   return LT.first;
    438 }
    439 
    440 int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
    441                                                unsigned Factor,
    442                                                ArrayRef<unsigned> Indices,
    443                                                unsigned Alignment,
    444                                                unsigned AddressSpace) {
    445   assert(Factor >= 2 && "Invalid interleave factor");
    446   assert(isa<VectorType>(VecTy) && "Expect a vector type");
    447 
    448   if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
    449     unsigned NumElts = VecTy->getVectorNumElements();
    450     Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
    451     unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy);
    452 
    453     // ldN/stN only support legal vector types of size 64 or 128 in bits.
    454     if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
    455       return Factor;
    456   }
    457 
    458   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
    459                                            Alignment, AddressSpace);
    460 }
    461 
    462 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
    463   int Cost = 0;
    464   for (auto *I : Tys) {
    465     if (!I->isVectorTy())
    466       continue;
    467     if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
    468       Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
    469         getMemoryOpCost(Instruction::Load, I, 128, 0);
    470   }
    471   return Cost;
    472 }
    473 
    474 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
    475   if (ST->isCortexA57())
    476     return 4;
    477   return 2;
    478 }
    479 
    480 void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
    481                                              TTI::UnrollingPreferences &UP) {
    482   // Enable partial unrolling and runtime unrolling.
    483   BaseT::getUnrollingPreferences(L, UP);
    484 
    485   // For inner loop, it is more likely to be a hot one, and the runtime check
    486   // can be promoted out from LICM pass, so the overhead is less, let's try
    487   // a larger threshold to unroll more loops.
    488   if (L->getLoopDepth() > 1)
    489     UP.PartialThreshold *= 2;
    490 
    491   // Disable partial & runtime unrolling on -Os.
    492   UP.PartialOptSizeThreshold = 0;
    493 }
    494 
    495 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
    496                                                          Type *ExpectedType) {
    497   switch (Inst->getIntrinsicID()) {
    498   default:
    499     return nullptr;
    500   case Intrinsic::aarch64_neon_st2:
    501   case Intrinsic::aarch64_neon_st3:
    502   case Intrinsic::aarch64_neon_st4: {
    503     // Create a struct type
    504     StructType *ST = dyn_cast<StructType>(ExpectedType);
    505     if (!ST)
    506       return nullptr;
    507     unsigned NumElts = Inst->getNumArgOperands() - 1;
    508     if (ST->getNumElements() != NumElts)
    509       return nullptr;
    510     for (unsigned i = 0, e = NumElts; i != e; ++i) {
    511       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
    512         return nullptr;
    513     }
    514     Value *Res = UndefValue::get(ExpectedType);
    515     IRBuilder<> Builder(Inst);
    516     for (unsigned i = 0, e = NumElts; i != e; ++i) {
    517       Value *L = Inst->getArgOperand(i);
    518       Res = Builder.CreateInsertValue(Res, L, i);
    519     }
    520     return Res;
    521   }
    522   case Intrinsic::aarch64_neon_ld2:
    523   case Intrinsic::aarch64_neon_ld3:
    524   case Intrinsic::aarch64_neon_ld4:
    525     if (Inst->getType() == ExpectedType)
    526       return Inst;
    527     return nullptr;
    528   }
    529 }
    530 
    531 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
    532                                         MemIntrinsicInfo &Info) {
    533   switch (Inst->getIntrinsicID()) {
    534   default:
    535     break;
    536   case Intrinsic::aarch64_neon_ld2:
    537   case Intrinsic::aarch64_neon_ld3:
    538   case Intrinsic::aarch64_neon_ld4:
    539     Info.ReadMem = true;
    540     Info.WriteMem = false;
    541     Info.IsSimple = true;
    542     Info.NumMemRefs = 1;
    543     Info.PtrVal = Inst->getArgOperand(0);
    544     break;
    545   case Intrinsic::aarch64_neon_st2:
    546   case Intrinsic::aarch64_neon_st3:
    547   case Intrinsic::aarch64_neon_st4:
    548     Info.ReadMem = false;
    549     Info.WriteMem = true;
    550     Info.IsSimple = true;
    551     Info.NumMemRefs = 1;
    552     Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
    553     break;
    554   }
    555 
    556   switch (Inst->getIntrinsicID()) {
    557   default:
    558     return false;
    559   case Intrinsic::aarch64_neon_ld2:
    560   case Intrinsic::aarch64_neon_st2:
    561     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
    562     break;
    563   case Intrinsic::aarch64_neon_ld3:
    564   case Intrinsic::aarch64_neon_st3:
    565     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
    566     break;
    567   case Intrinsic::aarch64_neon_ld4:
    568   case Intrinsic::aarch64_neon_st4:
    569     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
    570     break;
    571   }
    572   return true;
    573 }
    574