Home | History | Annotate | Download | only in SystemZ
      1 //===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file implements a TargetTransformInfo analysis pass specific to the
     11 // SystemZ target machine. It uses the target's detailed information to provide
     12 // more precise answers to certain TTI queries, while letting the target
     13 // independent and default TTI implementations handle the rest.
     14 //
     15 //===----------------------------------------------------------------------===//
     16 
     17 #include "SystemZTargetTransformInfo.h"
     18 #include "llvm/Analysis/TargetTransformInfo.h"
     19 #include "llvm/CodeGen/BasicTTIImpl.h"
     20 #include "llvm/CodeGen/CostTable.h"
     21 #include "llvm/CodeGen/TargetLowering.h"
     22 #include "llvm/IR/IntrinsicInst.h"
     23 #include "llvm/Support/Debug.h"
     24 using namespace llvm;
     25 
     26 #define DEBUG_TYPE "systemztti"
     27 
     28 //===----------------------------------------------------------------------===//
     29 //
     30 // SystemZ cost model.
     31 //
     32 //===----------------------------------------------------------------------===//
     33 
     34 int SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
     35   assert(Ty->isIntegerTy());
     36 
     37   unsigned BitSize = Ty->getPrimitiveSizeInBits();
     38   // There is no cost model for constants with a bit size of 0. Return TCC_Free
     39   // here, so that constant hoisting will ignore this constant.
     40   if (BitSize == 0)
     41     return TTI::TCC_Free;
     42   // No cost model for operations on integers larger than 64 bit implemented yet.
     43   if (BitSize > 64)
     44     return TTI::TCC_Free;
     45 
     46   if (Imm == 0)
     47     return TTI::TCC_Free;
     48 
     49   if (Imm.getBitWidth() <= 64) {
     50     // Constants loaded via lgfi.
     51     if (isInt<32>(Imm.getSExtValue()))
     52       return TTI::TCC_Basic;
     53     // Constants loaded via llilf.
     54     if (isUInt<32>(Imm.getZExtValue()))
     55       return TTI::TCC_Basic;
     56     // Constants loaded via llihf:
     57     if ((Imm.getZExtValue() & 0xffffffff) == 0)
     58       return TTI::TCC_Basic;
     59 
     60     return 2 * TTI::TCC_Basic;
     61   }
     62 
     63   return 4 * TTI::TCC_Basic;
     64 }
     65 
     66 int SystemZTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
     67                                   const APInt &Imm, Type *Ty) {
     68   assert(Ty->isIntegerTy());
     69 
     70   unsigned BitSize = Ty->getPrimitiveSizeInBits();
     71   // There is no cost model for constants with a bit size of 0. Return TCC_Free
     72   // here, so that constant hoisting will ignore this constant.
     73   if (BitSize == 0)
     74     return TTI::TCC_Free;
     75   // No cost model for operations on integers larger than 64 bit implemented yet.
     76   if (BitSize > 64)
     77     return TTI::TCC_Free;
     78 
     79   switch (Opcode) {
     80   default:
     81     return TTI::TCC_Free;
     82   case Instruction::GetElementPtr:
     83     // Always hoist the base address of a GetElementPtr. This prevents the
     84     // creation of new constants for every base constant that gets constant
     85     // folded with the offset.
     86     if (Idx == 0)
     87       return 2 * TTI::TCC_Basic;
     88     return TTI::TCC_Free;
     89   case Instruction::Store:
     90     if (Idx == 0 && Imm.getBitWidth() <= 64) {
     91       // Any 8-bit immediate store can by implemented via mvi.
     92       if (BitSize == 8)
     93         return TTI::TCC_Free;
     94       // 16-bit immediate values can be stored via mvhhi/mvhi/mvghi.
     95       if (isInt<16>(Imm.getSExtValue()))
     96         return TTI::TCC_Free;
     97     }
     98     break;
     99   case Instruction::ICmp:
    100     if (Idx == 1 && Imm.getBitWidth() <= 64) {
    101       // Comparisons against signed 32-bit immediates implemented via cgfi.
    102       if (isInt<32>(Imm.getSExtValue()))
    103         return TTI::TCC_Free;
    104       // Comparisons against unsigned 32-bit immediates implemented via clgfi.
    105       if (isUInt<32>(Imm.getZExtValue()))
    106         return TTI::TCC_Free;
    107     }
    108     break;
    109   case Instruction::Add:
    110   case Instruction::Sub:
    111     if (Idx == 1 && Imm.getBitWidth() <= 64) {
    112       // We use algfi/slgfi to add/subtract 32-bit unsigned immediates.
    113       if (isUInt<32>(Imm.getZExtValue()))
    114         return TTI::TCC_Free;
    115       // Or their negation, by swapping addition vs. subtraction.
    116       if (isUInt<32>(-Imm.getSExtValue()))
    117         return TTI::TCC_Free;
    118     }
    119     break;
    120   case Instruction::Mul:
    121     if (Idx == 1 && Imm.getBitWidth() <= 64) {
    122       // We use msgfi to multiply by 32-bit signed immediates.
    123       if (isInt<32>(Imm.getSExtValue()))
    124         return TTI::TCC_Free;
    125     }
    126     break;
    127   case Instruction::Or:
    128   case Instruction::Xor:
    129     if (Idx == 1 && Imm.getBitWidth() <= 64) {
    130       // Masks supported by oilf/xilf.
    131       if (isUInt<32>(Imm.getZExtValue()))
    132         return TTI::TCC_Free;
    133       // Masks supported by oihf/xihf.
    134       if ((Imm.getZExtValue() & 0xffffffff) == 0)
    135         return TTI::TCC_Free;
    136     }
    137     break;
    138   case Instruction::And:
    139     if (Idx == 1 && Imm.getBitWidth() <= 64) {
    140       // Any 32-bit AND operation can by implemented via nilf.
    141       if (BitSize <= 32)
    142         return TTI::TCC_Free;
    143       // 64-bit masks supported by nilf.
    144       if (isUInt<32>(~Imm.getZExtValue()))
    145         return TTI::TCC_Free;
    146       // 64-bit masks supported by nilh.
    147       if ((Imm.getZExtValue() & 0xffffffff) == 0xffffffff)
    148         return TTI::TCC_Free;
    149       // Some 64-bit AND operations can be implemented via risbg.
    150       const SystemZInstrInfo *TII = ST->getInstrInfo();
    151       unsigned Start, End;
    152       if (TII->isRxSBGMask(Imm.getZExtValue(), BitSize, Start, End))
    153         return TTI::TCC_Free;
    154     }
    155     break;
    156   case Instruction::Shl:
    157   case Instruction::LShr:
    158   case Instruction::AShr:
    159     // Always return TCC_Free for the shift value of a shift instruction.
    160     if (Idx == 1)
    161       return TTI::TCC_Free;
    162     break;
    163   case Instruction::UDiv:
    164   case Instruction::SDiv:
    165   case Instruction::URem:
    166   case Instruction::SRem:
    167   case Instruction::Trunc:
    168   case Instruction::ZExt:
    169   case Instruction::SExt:
    170   case Instruction::IntToPtr:
    171   case Instruction::PtrToInt:
    172   case Instruction::BitCast:
    173   case Instruction::PHI:
    174   case Instruction::Call:
    175   case Instruction::Select:
    176   case Instruction::Ret:
    177   case Instruction::Load:
    178     break;
    179   }
    180 
    181   return SystemZTTIImpl::getIntImmCost(Imm, Ty);
    182 }
    183 
    184 int SystemZTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
    185                                   const APInt &Imm, Type *Ty) {
    186   assert(Ty->isIntegerTy());
    187 
    188   unsigned BitSize = Ty->getPrimitiveSizeInBits();
    189   // There is no cost model for constants with a bit size of 0. Return TCC_Free
    190   // here, so that constant hoisting will ignore this constant.
    191   if (BitSize == 0)
    192     return TTI::TCC_Free;
    193   // No cost model for operations on integers larger than 64 bit implemented yet.
    194   if (BitSize > 64)
    195     return TTI::TCC_Free;
    196 
    197   switch (IID) {
    198   default:
    199     return TTI::TCC_Free;
    200   case Intrinsic::sadd_with_overflow:
    201   case Intrinsic::uadd_with_overflow:
    202   case Intrinsic::ssub_with_overflow:
    203   case Intrinsic::usub_with_overflow:
    204     // These get expanded to include a normal addition/subtraction.
    205     if (Idx == 1 && Imm.getBitWidth() <= 64) {
    206       if (isUInt<32>(Imm.getZExtValue()))
    207         return TTI::TCC_Free;
    208       if (isUInt<32>(-Imm.getSExtValue()))
    209         return TTI::TCC_Free;
    210     }
    211     break;
    212   case Intrinsic::smul_with_overflow:
    213   case Intrinsic::umul_with_overflow:
    214     // These get expanded to include a normal multiplication.
    215     if (Idx == 1 && Imm.getBitWidth() <= 64) {
    216       if (isInt<32>(Imm.getSExtValue()))
    217         return TTI::TCC_Free;
    218     }
    219     break;
    220   case Intrinsic::experimental_stackmap:
    221     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
    222       return TTI::TCC_Free;
    223     break;
    224   case Intrinsic::experimental_patchpoint_void:
    225   case Intrinsic::experimental_patchpoint_i64:
    226     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
    227       return TTI::TCC_Free;
    228     break;
    229   }
    230   return SystemZTTIImpl::getIntImmCost(Imm, Ty);
    231 }
    232 
    233 TargetTransformInfo::PopcntSupportKind
    234 SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {
    235   assert(isPowerOf2_32(TyWidth) && "Type width must be power of 2");
    236   if (ST->hasPopulationCount() && TyWidth <= 64)
    237     return TTI::PSK_FastHardware;
    238   return TTI::PSK_Software;
    239 }
    240 
    241 void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
    242                                              TTI::UnrollingPreferences &UP) {
    243   // Find out if L contains a call, what the machine instruction count
    244   // estimate is, and how many stores there are.
    245   bool HasCall = false;
    246   unsigned NumStores = 0;
    247   for (auto &BB : L->blocks())
    248     for (auto &I : *BB) {
    249       if (isa<CallInst>(&I) || isa<InvokeInst>(&I)) {
    250         ImmutableCallSite CS(&I);
    251         if (const Function *F = CS.getCalledFunction()) {
    252           if (isLoweredToCall(F))
    253             HasCall = true;
    254           if (F->getIntrinsicID() == Intrinsic::memcpy ||
    255               F->getIntrinsicID() == Intrinsic::memset)
    256             NumStores++;
    257         } else { // indirect call.
    258           HasCall = true;
    259         }
    260       }
    261       if (isa<StoreInst>(&I)) {
    262         Type *MemAccessTy = I.getOperand(0)->getType();
    263         NumStores += getMemoryOpCost(Instruction::Store, MemAccessTy, 0, 0);
    264       }
    265     }
    266 
    267   // The z13 processor will run out of store tags if too many stores
    268   // are fed into it too quickly. Therefore make sure there are not
    269   // too many stores in the resulting unrolled loop.
    270   unsigned const Max = (NumStores ? (12 / NumStores) : UINT_MAX);
    271 
    272   if (HasCall) {
    273     // Only allow full unrolling if loop has any calls.
    274     UP.FullUnrollMaxCount = Max;
    275     UP.MaxCount = 1;
    276     return;
    277   }
    278 
    279   UP.MaxCount = Max;
    280   if (UP.MaxCount <= 1)
    281     return;
    282 
    283   // Allow partial and runtime trip count unrolling.
    284   UP.Partial = UP.Runtime = true;
    285 
    286   UP.PartialThreshold = 75;
    287   UP.DefaultUnrollRuntimeCount = 4;
    288 
    289   // Allow expensive instructions in the pre-header of the loop.
    290   UP.AllowExpensiveTripCount = true;
    291 
    292   UP.Force = true;
    293 }
    294 
    295 
    296 bool SystemZTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
    297                                    TargetTransformInfo::LSRCost &C2) {
    298   // SystemZ specific: check instruction count (first), and don't care about
    299   // ImmCost, since offsets are checked explicitly.
    300   return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
    301                   C1.NumIVMuls, C1.NumBaseAdds,
    302                   C1.ScaleCost, C1.SetupCost) <
    303     std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
    304              C2.NumIVMuls, C2.NumBaseAdds,
    305              C2.ScaleCost, C2.SetupCost);
    306 }
    307 
    308 unsigned SystemZTTIImpl::getNumberOfRegisters(bool Vector) {
    309   if (!Vector)
    310     // Discount the stack pointer.  Also leave out %r0, since it can't
    311     // be used in an address.
    312     return 14;
    313   if (ST->hasVector())
    314     return 32;
    315   return 0;
    316 }
    317 
    318 unsigned SystemZTTIImpl::getRegisterBitWidth(bool Vector) const {
    319   if (!Vector)
    320     return 64;
    321   if (ST->hasVector())
    322     return 128;
    323   return 0;
    324 }
    325 
    326 bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
    327   EVT VT = TLI->getValueType(DL, DataType);
    328   return (VT.isScalarInteger() && TLI->isTypeLegal(VT));
    329 }
    330 
    331 int SystemZTTIImpl::getArithmeticInstrCost(
    332     unsigned Opcode, Type *Ty,
    333     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
    334     TTI::OperandValueProperties Opd1PropInfo,
    335     TTI::OperandValueProperties Opd2PropInfo,
    336     ArrayRef<const Value *> Args) {
    337 
    338   // TODO: return a good value for BB-VECTORIZER that includes the
    339   // immediate loads, which we do not want to count for the loop
    340   // vectorizer, since they are hopefully hoisted out of the loop. This
    341   // would require a new parameter 'InLoop', but not sure if constant
    342   // args are common enough to motivate this.
    343 
    344   unsigned ScalarBits = Ty->getScalarSizeInBits();
    345 
    346   // Div with a constant which is a power of 2 will be converted by
    347   // DAGCombiner to use shifts. With vector shift-element instructions, a
    348   // vector sdiv costs about as much as a scalar one.
    349   const unsigned SDivCostEstimate = 4;
    350   bool SDivPow2 = false;
    351   bool UDivPow2 = false;
    352   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv) &&
    353       Args.size() == 2) {
    354     const ConstantInt *CI = nullptr;
    355     if (const Constant *C = dyn_cast<Constant>(Args[1])) {
    356       if (C->getType()->isVectorTy())
    357         CI = dyn_cast_or_null<const ConstantInt>(C->getSplatValue());
    358       else
    359         CI = dyn_cast<const ConstantInt>(C);
    360     }
    361     if (CI != nullptr &&
    362         (CI->getValue().isPowerOf2() || (-CI->getValue()).isPowerOf2())) {
    363       if (Opcode == Instruction::SDiv)
    364         SDivPow2 = true;
    365       else
    366         UDivPow2 = true;
    367     }
    368   }
    369 
    370   if (Ty->isVectorTy()) {
    371     assert (ST->hasVector() && "getArithmeticInstrCost() called with vector type.");
    372     unsigned VF = Ty->getVectorNumElements();
    373     unsigned NumVectors = getNumberOfParts(Ty);
    374 
    375     // These vector operations are custom handled, but are still supported
    376     // with one instruction per vector, regardless of element size.
    377     if (Opcode == Instruction::Shl || Opcode == Instruction::LShr ||
    378         Opcode == Instruction::AShr || UDivPow2) {
    379       return NumVectors;
    380     }
    381 
    382     if (SDivPow2)
    383       return (NumVectors * SDivCostEstimate);
    384 
    385     // These FP operations are supported with a single vector instruction for
    386     // double (base implementation assumes float generally costs 2). For
    387     // FP128, the scalar cost is 1, and there is no overhead since the values
    388     // are already in scalar registers.
    389     if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
    390         Opcode == Instruction::FMul || Opcode == Instruction::FDiv) {
    391       switch (ScalarBits) {
    392       case 32: {
    393         // The vector enhancements facility 1 provides v4f32 instructions.
    394         if (ST->hasVectorEnhancements1())
    395           return NumVectors;
    396         // Return the cost of multiple scalar invocation plus the cost of
    397         // inserting and extracting the values.
    398         unsigned ScalarCost = getArithmeticInstrCost(Opcode, Ty->getScalarType());
    399         unsigned Cost = (VF * ScalarCost) + getScalarizationOverhead(Ty, Args);
    400         // FIXME: VF 2 for these FP operations are currently just as
    401         // expensive as for VF 4.
    402         if (VF == 2)
    403           Cost *= 2;
    404         return Cost;
    405       }
    406       case 64:
    407       case 128:
    408         return NumVectors;
    409       default:
    410         break;
    411       }
    412     }
    413 
    414     // There is no native support for FRem.
    415     if (Opcode == Instruction::FRem) {
    416       unsigned Cost = (VF * LIBCALL_COST) + getScalarizationOverhead(Ty, Args);
    417       // FIXME: VF 2 for float is currently just as expensive as for VF 4.
    418       if (VF == 2 && ScalarBits == 32)
    419         Cost *= 2;
    420       return Cost;
    421     }
    422   }
    423   else {  // Scalar:
    424     // These FP operations are supported with a dedicated instruction for
    425     // float, double and fp128 (base implementation assumes float generally
    426     // costs 2).
    427     if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub ||
    428         Opcode == Instruction::FMul || Opcode == Instruction::FDiv)
    429       return 1;
    430 
    431     // There is no native support for FRem.
    432     if (Opcode == Instruction::FRem)
    433       return LIBCALL_COST;
    434 
    435     if (Opcode == Instruction::LShr || Opcode == Instruction::AShr)
    436       return (ScalarBits >= 32 ? 1 : 2 /*ext*/);
    437 
    438     // Or requires one instruction, although it has custom handling for i64.
    439     if (Opcode == Instruction::Or)
    440       return 1;
    441 
    442     if (Opcode == Instruction::Xor && ScalarBits == 1)
    443       // 2 * ipm sequences ; xor ; shift ; compare
    444       return 7;
    445 
    446     if (UDivPow2)
    447       return 1;
    448     if (SDivPow2)
    449       return SDivCostEstimate;
    450 
    451     // An extra extension for narrow types is needed.
    452     if ((Opcode == Instruction::SDiv || Opcode == Instruction::SRem))
    453       // sext of op(s) for narrow types
    454       return (ScalarBits < 32 ? 4 : (ScalarBits == 32 ? 2 : 1));
    455 
    456     if (Opcode == Instruction::UDiv || Opcode == Instruction::URem)
    457       // Clearing of low 64 bit reg + sext of op(s) for narrow types + dl[g]r
    458       return (ScalarBits < 32 ? 4 : 2);
    459   }
    460 
    461   // Fallback to the default implementation.
    462   return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
    463                                        Opd1PropInfo, Opd2PropInfo, Args);
    464 }
    465 
    466 
    467 int SystemZTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
    468                                    Type *SubTp) {
    469   assert (Tp->isVectorTy());
    470   assert (ST->hasVector() && "getShuffleCost() called.");
    471   unsigned NumVectors = getNumberOfParts(Tp);
    472 
    473   // TODO: Since fp32 is expanded, the shuffle cost should always be 0.
    474 
    475   // FP128 values are always in scalar registers, so there is no work
    476   // involved with a shuffle, except for broadcast. In that case register
    477   // moves are done with a single instruction per element.
    478   if (Tp->getScalarType()->isFP128Ty())
    479     return (Kind == TargetTransformInfo::SK_Broadcast ? NumVectors - 1 : 0);
    480 
    481   switch (Kind) {
    482   case  TargetTransformInfo::SK_ExtractSubvector:
    483     // ExtractSubvector Index indicates start offset.
    484 
    485     // Extracting a subvector from first index is a noop.
    486     return (Index == 0 ? 0 : NumVectors);
    487 
    488   case TargetTransformInfo::SK_Broadcast:
    489     // Loop vectorizer calls here to figure out the extra cost of
    490     // broadcasting a loaded value to all elements of a vector. Since vlrep
    491     // loads and replicates with a single instruction, adjust the returned
    492     // value.
    493     return NumVectors - 1;
    494 
    495   default:
    496 
    497     // SystemZ supports single instruction permutation / replication.
    498     return NumVectors;
    499   }
    500 
    501   return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
    502 }
    503 
    504 // Return the log2 difference of the element sizes of the two vector types.
    505 static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {
    506   unsigned Bits0 = Ty0->getScalarSizeInBits();
    507   unsigned Bits1 = Ty1->getScalarSizeInBits();
    508 
    509   if (Bits1 >  Bits0)
    510     return (Log2_32(Bits1) - Log2_32(Bits0));
    511 
    512   return (Log2_32(Bits0) - Log2_32(Bits1));
    513 }
    514 
    515 // Return the number of instructions needed to truncate SrcTy to DstTy.
    516 unsigned SystemZTTIImpl::
    517 getVectorTruncCost(Type *SrcTy, Type *DstTy) {
    518   assert (SrcTy->isVectorTy() && DstTy->isVectorTy());
    519   assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() &&
    520           "Packing must reduce size of vector type.");
    521   assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() &&
    522           "Packing should not change number of elements.");
    523 
    524   // TODO: Since fp32 is expanded, the extract cost should always be 0.
    525 
    526   unsigned NumParts = getNumberOfParts(SrcTy);
    527   if (NumParts <= 2)
    528     // Up to 2 vector registers can be truncated efficiently with pack or
    529     // permute. The latter requires an immediate mask to be loaded, which
    530     // typically gets hoisted out of a loop.  TODO: return a good value for
    531     // BB-VECTORIZER that includes the immediate loads, which we do not want
    532     // to count for the loop vectorizer.
    533     return 1;
    534 
    535   unsigned Cost = 0;
    536   unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
    537   unsigned VF = SrcTy->getVectorNumElements();
    538   for (unsigned P = 0; P < Log2Diff; ++P) {
    539     if (NumParts > 1)
    540       NumParts /= 2;
    541     Cost += NumParts;
    542   }
    543 
    544   // Currently, a general mix of permutes and pack instructions is output by
    545   // isel, which follow the cost computation above except for this case which
    546   // is one instruction less:
    547   if (VF == 8 && SrcTy->getScalarSizeInBits() == 64 &&
    548       DstTy->getScalarSizeInBits() == 8)
    549     Cost--;
    550 
    551   return Cost;
    552 }
    553 
    554 // Return the cost of converting a vector bitmask produced by a compare
    555 // (SrcTy), to the type of the select or extend instruction (DstTy).
    556 unsigned SystemZTTIImpl::
    557 getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {
    558   assert (SrcTy->isVectorTy() && DstTy->isVectorTy() &&
    559           "Should only be called with vector types.");
    560 
    561   unsigned PackCost = 0;
    562   unsigned SrcScalarBits = SrcTy->getScalarSizeInBits();
    563   unsigned DstScalarBits = DstTy->getScalarSizeInBits();
    564   unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy);
    565   if (SrcScalarBits > DstScalarBits)
    566     // The bitmask will be truncated.
    567     PackCost = getVectorTruncCost(SrcTy, DstTy);
    568   else if (SrcScalarBits < DstScalarBits) {
    569     unsigned DstNumParts = getNumberOfParts(DstTy);
    570     // Each vector select needs its part of the bitmask unpacked.
    571     PackCost = Log2Diff * DstNumParts;
    572     // Extra cost for moving part of mask before unpacking.
    573     PackCost += DstNumParts - 1;
    574   }
    575 
    576   return PackCost;
    577 }
    578 
    579 // Return the type of the compared operands. This is needed to compute the
    580 // cost for a Select / ZExt or SExt instruction.
    581 static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {
    582   Type *OpTy = nullptr;
    583   if (CmpInst *CI = dyn_cast<CmpInst>(I->getOperand(0)))
    584     OpTy = CI->getOperand(0)->getType();
    585   else if (Instruction *LogicI = dyn_cast<Instruction>(I->getOperand(0)))
    586     if (LogicI->getNumOperands() == 2)
    587       if (CmpInst *CI0 = dyn_cast<CmpInst>(LogicI->getOperand(0)))
    588         if (isa<CmpInst>(LogicI->getOperand(1)))
    589           OpTy = CI0->getOperand(0)->getType();
    590 
    591   if (OpTy != nullptr) {
    592     if (VF == 1) {
    593       assert (!OpTy->isVectorTy() && "Expected scalar type");
    594       return OpTy;
    595     }
    596     // Return the potentially vectorized type based on 'I' and 'VF'.  'I' may
    597     // be either scalar or already vectorized with a same or lesser VF.
    598     Type *ElTy = OpTy->getScalarType();
    599     return VectorType::get(ElTy, VF);
    600   }
    601 
    602   return nullptr;
    603 }
    604 
    605 int SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
    606                                      const Instruction *I) {
    607   unsigned DstScalarBits = Dst->getScalarSizeInBits();
    608   unsigned SrcScalarBits = Src->getScalarSizeInBits();
    609 
    610   if (Src->isVectorTy()) {
    611     assert (ST->hasVector() && "getCastInstrCost() called with vector type.");
    612     assert (Dst->isVectorTy());
    613     unsigned VF = Src->getVectorNumElements();
    614     unsigned NumDstVectors = getNumberOfParts(Dst);
    615     unsigned NumSrcVectors = getNumberOfParts(Src);
    616 
    617     if (Opcode == Instruction::Trunc) {
    618       if (Src->getScalarSizeInBits() == Dst->getScalarSizeInBits())
    619         return 0; // Check for NOOP conversions.
    620       return getVectorTruncCost(Src, Dst);
    621     }
    622 
    623     if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
    624       if (SrcScalarBits >= 8) {
    625         // ZExt/SExt will be handled with one unpack per doubling of width.
    626         unsigned NumUnpacks = getElSizeLog2Diff(Src, Dst);
    627 
    628         // For types that spans multiple vector registers, some additional
    629         // instructions are used to setup the unpacking.
    630         unsigned NumSrcVectorOps =
    631           (NumUnpacks > 1 ? (NumDstVectors - NumSrcVectors)
    632                           : (NumDstVectors / 2));
    633 
    634         return (NumUnpacks * NumDstVectors) + NumSrcVectorOps;
    635       }
    636       else if (SrcScalarBits == 1) {
    637         // This should be extension of a compare i1 result.
    638         // If we know what the widths of the compared operands, get the
    639         // cost of converting it to Dst. Otherwise assume same widths.
    640         unsigned Cost = 0;
    641         Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
    642         if (CmpOpTy != nullptr)
    643           Cost = getVectorBitmaskConversionCost(CmpOpTy, Dst);
    644         if (Opcode == Instruction::ZExt)
    645           // One 'vn' per dst vector with an immediate mask.
    646           Cost += NumDstVectors;
    647         return Cost;
    648       }
    649     }
    650 
    651     if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP ||
    652         Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI) {
    653       // TODO: Fix base implementation which could simplify things a bit here
    654       // (seems to miss on differentiating on scalar/vector types).
    655 
    656       // Only 64 bit vector conversions are natively supported.
    657       if (SrcScalarBits == 64 && DstScalarBits == 64)
    658         return NumDstVectors;
    659 
    660       // Return the cost of multiple scalar invocation plus the cost of
    661       // inserting and extracting the values. Base implementation does not
    662       // realize float->int gets scalarized.
    663       unsigned ScalarCost = getCastInstrCost(Opcode, Dst->getScalarType(),
    664                                              Src->getScalarType());
    665       unsigned TotCost = VF * ScalarCost;
    666       bool NeedsInserts = true, NeedsExtracts = true;
    667       // FP128 registers do not get inserted or extracted.
    668       if (DstScalarBits == 128 &&
    669           (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP))
    670         NeedsInserts = false;
    671       if (SrcScalarBits == 128 &&
    672           (Opcode == Instruction::FPToSI || Opcode == Instruction::FPToUI))
    673         NeedsExtracts = false;
    674 
    675       TotCost += getScalarizationOverhead(Dst, NeedsInserts, NeedsExtracts);
    676 
    677       // FIXME: VF 2 for float<->i32 is currently just as expensive as for VF 4.
    678       if (VF == 2 && SrcScalarBits == 32 && DstScalarBits == 32)
    679         TotCost *= 2;
    680 
    681       return TotCost;
    682     }
    683 
    684     if (Opcode == Instruction::FPTrunc) {
    685       if (SrcScalarBits == 128)  // fp128 -> double/float + inserts of elements.
    686         return VF /*ldxbr/lexbr*/ + getScalarizationOverhead(Dst, true, false);
    687       else // double -> float
    688         return VF / 2 /*vledb*/ + std::max(1U, VF / 4 /*vperm*/);
    689     }
    690 
    691     if (Opcode == Instruction::FPExt) {
    692       if (SrcScalarBits == 32 && DstScalarBits == 64) {
    693         // float -> double is very rare and currently unoptimized. Instead of
    694         // using vldeb, which can do two at a time, all conversions are
    695         // scalarized.
    696         return VF * 2;
    697       }
    698       // -> fp128.  VF * lxdb/lxeb + extraction of elements.
    699       return VF + getScalarizationOverhead(Src, false, true);
    700     }
    701   }
    702   else { // Scalar
    703     assert (!Dst->isVectorTy());
    704 
    705     if (Opcode == Instruction::SIToFP || Opcode == Instruction::UIToFP)
    706       return (SrcScalarBits >= 32 ? 1 : 2 /*i8/i16 extend*/);
    707 
    708     if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
    709         Src->isIntegerTy(1)) {
    710       // This should be extension of a compare i1 result, which is done with
    711       // ipm and a varying sequence of instructions.
    712       unsigned Cost = 0;
    713       if (Opcode == Instruction::SExt)
    714         Cost = (DstScalarBits < 64 ? 3 : 4);
    715       if (Opcode == Instruction::ZExt)
    716         Cost = 3;
    717       Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I) : nullptr);
    718       if (CmpOpTy != nullptr && CmpOpTy->isFloatingPointTy())
    719         // If operands of an fp-type was compared, this costs +1.
    720         Cost++;
    721 
    722       return Cost;
    723     }
    724   }
    725 
    726   return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
    727 }
    728 
    729 int SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
    730                                        const Instruction *I) {
    731   if (ValTy->isVectorTy()) {
    732     assert (ST->hasVector() && "getCmpSelInstrCost() called with vector type.");
    733     unsigned VF = ValTy->getVectorNumElements();
    734 
    735     // Called with a compare instruction.
    736     if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
    737       unsigned PredicateExtraCost = 0;
    738       if (I != nullptr) {
    739         // Some predicates cost one or two extra instructions.
    740         switch (cast<CmpInst>(I)->getPredicate()) {
    741         case CmpInst::Predicate::ICMP_NE:
    742         case CmpInst::Predicate::ICMP_UGE:
    743         case CmpInst::Predicate::ICMP_ULE:
    744         case CmpInst::Predicate::ICMP_SGE:
    745         case CmpInst::Predicate::ICMP_SLE:
    746           PredicateExtraCost = 1;
    747           break;
    748         case CmpInst::Predicate::FCMP_ONE:
    749         case CmpInst::Predicate::FCMP_ORD:
    750         case CmpInst::Predicate::FCMP_UEQ:
    751         case CmpInst::Predicate::FCMP_UNO:
    752           PredicateExtraCost = 2;
    753           break;
    754         default:
    755           break;
    756         }
    757       }
    758 
    759       // Float is handled with 2*vmr[lh]f + 2*vldeb + vfchdb for each pair of
    760       // floats.  FIXME: <2 x float> generates same code as <4 x float>.
    761       unsigned CmpCostPerVector = (ValTy->getScalarType()->isFloatTy() ? 10 : 1);
    762       unsigned NumVecs_cmp = getNumberOfParts(ValTy);
    763 
    764       unsigned Cost = (NumVecs_cmp * (CmpCostPerVector + PredicateExtraCost));
    765       return Cost;
    766     }
    767     else { // Called with a select instruction.
    768       assert (Opcode == Instruction::Select);
    769 
    770       // We can figure out the extra cost of packing / unpacking if the
    771       // instruction was passed and the compare instruction is found.
    772       unsigned PackCost = 0;
    773       Type *CmpOpTy = ((I != nullptr) ? getCmpOpsType(I, VF) : nullptr);
    774       if (CmpOpTy != nullptr)
    775         PackCost =
    776           getVectorBitmaskConversionCost(CmpOpTy, ValTy);
    777 
    778       return getNumberOfParts(ValTy) /*vsel*/ + PackCost;
    779     }
    780   }
    781   else { // Scalar
    782     switch (Opcode) {
    783     case Instruction::ICmp: {
    784       unsigned Cost = 1;
    785       if (ValTy->isIntegerTy() && ValTy->getScalarSizeInBits() <= 16)
    786         Cost += 2; // extend both operands
    787       return Cost;
    788     }
    789     case Instruction::Select:
    790       if (ValTy->isFloatingPointTy())
    791         return 4; // No load on condition for FP, so this costs a conditional jump.
    792       return 1; // Load On Condition.
    793     }
    794   }
    795 
    796   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, nullptr);
    797 }
    798 
    799 int SystemZTTIImpl::
    800 getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
    801   // vlvgp will insert two grs into a vector register, so only count half the
    802   // number of instructions.
    803   if (Opcode == Instruction::InsertElement && Val->isIntOrIntVectorTy(64))
    804     return ((Index % 2 == 0) ? 1 : 0);
    805 
    806   if (Opcode == Instruction::ExtractElement) {
    807     int Cost = ((Val->getScalarSizeInBits() == 1) ? 2 /*+test-under-mask*/ : 1);
    808 
    809     // Give a slight penalty for moving out of vector pipeline to FXU unit.
    810     if (Index == 0 && Val->isIntOrIntVectorTy())
    811       Cost += 1;
    812 
    813     return Cost;
    814   }
    815 
    816   return BaseT::getVectorInstrCost(Opcode, Val, Index);
    817 }
    818 
    819 int SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
    820                                     unsigned Alignment, unsigned AddressSpace,
    821                                     const Instruction *I) {
    822   assert(!Src->isVoidTy() && "Invalid type");
    823 
    824   if (!Src->isVectorTy() && Opcode == Instruction::Load &&
    825       I != nullptr && I->hasOneUse()) {
    826       const Instruction *UserI = cast<Instruction>(*I->user_begin());
    827       unsigned Bits = Src->getScalarSizeInBits();
    828       bool FoldsLoad = false;
    829       switch (UserI->getOpcode()) {
    830       case Instruction::ICmp:
    831       case Instruction::Add:
    832       case Instruction::Sub:
    833       case Instruction::Mul:
    834       case Instruction::SDiv:
    835       case Instruction::UDiv:
    836       case Instruction::And:
    837       case Instruction::Or:
    838       case Instruction::Xor:
    839       // This also makes sense for float operations, but disabled for now due
    840       // to regressions.
    841       // case Instruction::FCmp:
    842       // case Instruction::FAdd:
    843       // case Instruction::FSub:
    844       // case Instruction::FMul:
    845       // case Instruction::FDiv:
    846         FoldsLoad = (Bits == 32 || Bits == 64);
    847         break;
    848       }
    849 
    850       if (FoldsLoad) {
    851         assert (UserI->getNumOperands() == 2 &&
    852                 "Expected to only handle binops.");
    853 
    854         // UserI can't fold two loads, so in that case return 0 cost only
    855         // half of the time.
    856         for (unsigned i = 0; i < 2; ++i) {
    857           if (UserI->getOperand(i) == I)
    858             continue;
    859           if (LoadInst *LI = dyn_cast<LoadInst>(UserI->getOperand(i))) {
    860             if (LI->hasOneUse())
    861               return i == 0;
    862           }
    863         }
    864 
    865         return 0;
    866       }
    867   }
    868 
    869   unsigned NumOps = getNumberOfParts(Src);
    870 
    871   if (Src->getScalarSizeInBits() == 128)
    872     // 128 bit scalars are held in a pair of two 64 bit registers.
    873     NumOps *= 2;
    874 
    875   return  NumOps;
    876 }
    877 
    878 int SystemZTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
    879                                                unsigned Factor,
    880                                                ArrayRef<unsigned> Indices,
    881                                                unsigned Alignment,
    882                                                unsigned AddressSpace) {
    883   assert(isa<VectorType>(VecTy) &&
    884          "Expect a vector type for interleaved memory op");
    885 
    886   unsigned WideBits = (VecTy->isPtrOrPtrVectorTy() ?
    887      (64U * VecTy->getVectorNumElements()) : VecTy->getPrimitiveSizeInBits());
    888   assert (WideBits > 0 && "Could not compute size of vector");
    889   int NumWideParts =
    890     ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U));
    891 
    892   // How many source vectors are handled to produce a vectorized operand?
    893   int NumElsPerVector = (VecTy->getVectorNumElements() / NumWideParts);
    894   int NumSrcParts =
    895     ((NumWideParts > NumElsPerVector) ? NumElsPerVector : NumWideParts);
    896 
    897   // A Load group may have gaps.
    898   unsigned NumOperands =
    899     ((Opcode == Instruction::Load) ? Indices.size() : Factor);
    900 
    901   // Each needed permute takes two vectors as input.
    902   if (NumSrcParts > 1)
    903     NumSrcParts--;
    904   int NumPermutes = NumSrcParts * NumOperands;
    905 
    906   // Cost of load/store operations and the permutations needed.
    907   return NumWideParts + NumPermutes;
    908 }
    909