1 //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file provides the implementation of a basic TargetTransformInfo pass 11 /// predicated on the target abstractions present in the target independent 12 /// code generator. It uses these (primarily TargetLowering) to model as much 13 /// of the TTI query interface as possible. It is included by most targets so 14 /// that they can specialize only a small subset of the query space. 15 /// 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/CodeGen/Passes.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Support/CommandLine.h" 22 #include "llvm/Target/TargetLowering.h" 23 #include "llvm/Target/TargetSubtargetInfo.h" 24 #include <utility> 25 using namespace llvm; 26 27 static cl::opt<unsigned> 28 PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0), 29 cl::desc("Threshold for partial unrolling"), cl::Hidden); 30 31 #define DEBUG_TYPE "basictti" 32 33 namespace { 34 35 class BasicTTI final : public ImmutablePass, public TargetTransformInfo { 36 const TargetMachine *TM; 37 38 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 39 /// are set if the result needs to be inserted and/or extracted from vectors. 40 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 41 42 /// Estimate the cost overhead of SK_Alternate shuffle. 43 unsigned getAltShuffleOverhead(Type *Ty) const; 44 45 const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); } 46 47 public: 48 BasicTTI() : ImmutablePass(ID), TM(nullptr) { 49 llvm_unreachable("This pass cannot be directly constructed"); 50 } 51 52 BasicTTI(const TargetMachine *TM) : ImmutablePass(ID), TM(TM) { 53 initializeBasicTTIPass(*PassRegistry::getPassRegistry()); 54 } 55 56 void initializePass() override { 57 pushTTIStack(this); 58 } 59 60 void getAnalysisUsage(AnalysisUsage &AU) const override { 61 TargetTransformInfo::getAnalysisUsage(AU); 62 } 63 64 /// Pass identification. 65 static char ID; 66 67 /// Provide necessary pointer adjustments for the two base classes. 68 void *getAdjustedAnalysisPointer(const void *ID) override { 69 if (ID == &TargetTransformInfo::ID) 70 return (TargetTransformInfo*)this; 71 return this; 72 } 73 74 bool hasBranchDivergence() const override; 75 76 /// \name Scalar TTI Implementations 77 /// @{ 78 79 bool isLegalAddImmediate(int64_t imm) const override; 80 bool isLegalICmpImmediate(int64_t imm) const override; 81 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 82 int64_t BaseOffset, bool HasBaseReg, 83 int64_t Scale) const override; 84 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 85 int64_t BaseOffset, bool HasBaseReg, 86 int64_t Scale) const override; 87 bool isTruncateFree(Type *Ty1, Type *Ty2) const override; 88 bool isTypeLegal(Type *Ty) const override; 89 unsigned getJumpBufAlignment() const override; 90 unsigned getJumpBufSize() const override; 91 bool shouldBuildLookupTables() const override; 92 bool haveFastSqrt(Type *Ty) const override; 93 void getUnrollingPreferences(Loop *L, 94 UnrollingPreferences &UP) const override; 95 96 /// @} 97 98 /// \name Vector TTI Implementations 99 /// @{ 100 101 unsigned getNumberOfRegisters(bool Vector) const override; 102 unsigned getMaximumUnrollFactor() const override; 103 unsigned getRegisterBitWidth(bool Vector) const override; 104 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, 105 OperandValueKind) const override; 106 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 107 int Index, Type *SubTp) const override; 108 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 109 Type *Src) const override; 110 unsigned getCFInstrCost(unsigned Opcode) const override; 111 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 112 Type *CondTy) const override; 113 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 114 unsigned Index) const override; 115 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 116 unsigned AddressSpace) const override; 117 unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy, 118 ArrayRef<Type*> Tys) const override; 119 unsigned getNumberOfParts(Type *Tp) const override; 120 unsigned getAddressComputationCost( Type *Ty, bool IsComplex) const override; 121 unsigned getReductionCost(unsigned Opcode, Type *Ty, 122 bool IsPairwise) const override; 123 124 /// @} 125 }; 126 127 } 128 129 INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti", 130 "Target independent code generator's TTI", true, true, false) 131 char BasicTTI::ID = 0; 132 133 ImmutablePass * 134 llvm::createBasicTargetTransformInfoPass(const TargetMachine *TM) { 135 return new BasicTTI(TM); 136 } 137 138 bool BasicTTI::hasBranchDivergence() const { return false; } 139 140 bool BasicTTI::isLegalAddImmediate(int64_t imm) const { 141 return getTLI()->isLegalAddImmediate(imm); 142 } 143 144 bool BasicTTI::isLegalICmpImmediate(int64_t imm) const { 145 return getTLI()->isLegalICmpImmediate(imm); 146 } 147 148 bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 149 int64_t BaseOffset, bool HasBaseReg, 150 int64_t Scale) const { 151 TargetLoweringBase::AddrMode AM; 152 AM.BaseGV = BaseGV; 153 AM.BaseOffs = BaseOffset; 154 AM.HasBaseReg = HasBaseReg; 155 AM.Scale = Scale; 156 return getTLI()->isLegalAddressingMode(AM, Ty); 157 } 158 159 int BasicTTI::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 160 int64_t BaseOffset, bool HasBaseReg, 161 int64_t Scale) const { 162 TargetLoweringBase::AddrMode AM; 163 AM.BaseGV = BaseGV; 164 AM.BaseOffs = BaseOffset; 165 AM.HasBaseReg = HasBaseReg; 166 AM.Scale = Scale; 167 return getTLI()->getScalingFactorCost(AM, Ty); 168 } 169 170 bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const { 171 return getTLI()->isTruncateFree(Ty1, Ty2); 172 } 173 174 bool BasicTTI::isTypeLegal(Type *Ty) const { 175 EVT T = getTLI()->getValueType(Ty); 176 return getTLI()->isTypeLegal(T); 177 } 178 179 unsigned BasicTTI::getJumpBufAlignment() const { 180 return getTLI()->getJumpBufAlignment(); 181 } 182 183 unsigned BasicTTI::getJumpBufSize() const { 184 return getTLI()->getJumpBufSize(); 185 } 186 187 bool BasicTTI::shouldBuildLookupTables() const { 188 const TargetLoweringBase *TLI = getTLI(); 189 return TLI->supportJumpTables() && 190 (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 191 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other)); 192 } 193 194 bool BasicTTI::haveFastSqrt(Type *Ty) const { 195 const TargetLoweringBase *TLI = getTLI(); 196 EVT VT = TLI->getValueType(Ty); 197 return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT); 198 } 199 200 void BasicTTI::getUnrollingPreferences(Loop *L, 201 UnrollingPreferences &UP) const { 202 // This unrolling functionality is target independent, but to provide some 203 // motivation for its intended use, for x86: 204 205 // According to the Intel 64 and IA-32 Architectures Optimization Reference 206 // Manual, Intel Core models and later have a loop stream detector 207 // (and associated uop queue) that can benefit from partial unrolling. 208 // The relevant requirements are: 209 // - The loop must have no more than 4 (8 for Nehalem and later) branches 210 // taken, and none of them may be calls. 211 // - The loop can have no more than 18 (28 for Nehalem and later) uops. 212 213 // According to the Software Optimization Guide for AMD Family 15h Processors, 214 // models 30h-4fh (Steamroller and later) have a loop predictor and loop 215 // buffer which can benefit from partial unrolling. 216 // The relevant requirements are: 217 // - The loop must have fewer than 16 branches 218 // - The loop must have less than 40 uops in all executed loop branches 219 220 // The number of taken branches in a loop is hard to estimate here, and 221 // benchmarking has revealed that it is better not to be conservative when 222 // estimating the branch count. As a result, we'll ignore the branch limits 223 // until someone finds a case where it matters in practice. 224 225 unsigned MaxOps; 226 const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>(); 227 if (PartialUnrollingThreshold.getNumOccurrences() > 0) 228 MaxOps = PartialUnrollingThreshold; 229 else if (ST->getSchedModel()->LoopMicroOpBufferSize > 0) 230 MaxOps = ST->getSchedModel()->LoopMicroOpBufferSize; 231 else 232 return; 233 234 // Scan the loop: don't unroll loops with calls. 235 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); 236 I != E; ++I) { 237 BasicBlock *BB = *I; 238 239 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J) 240 if (isa<CallInst>(J) || isa<InvokeInst>(J)) { 241 ImmutableCallSite CS(J); 242 if (const Function *F = CS.getCalledFunction()) { 243 if (!TopTTI->isLoweredToCall(F)) 244 continue; 245 } 246 247 return; 248 } 249 } 250 251 // Enable runtime and partial unrolling up to the specified size. 252 UP.Partial = UP.Runtime = true; 253 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps; 254 } 255 256 //===----------------------------------------------------------------------===// 257 // 258 // Calls used by the vectorizers. 259 // 260 //===----------------------------------------------------------------------===// 261 262 unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert, 263 bool Extract) const { 264 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 265 unsigned Cost = 0; 266 267 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 268 if (Insert) 269 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 270 if (Extract) 271 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 272 } 273 274 return Cost; 275 } 276 277 unsigned BasicTTI::getNumberOfRegisters(bool Vector) const { 278 return 1; 279 } 280 281 unsigned BasicTTI::getRegisterBitWidth(bool Vector) const { 282 return 32; 283 } 284 285 unsigned BasicTTI::getMaximumUnrollFactor() const { 286 return 1; 287 } 288 289 unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 290 OperandValueKind, 291 OperandValueKind) const { 292 // Check if any of the operands are vector operands. 293 const TargetLoweringBase *TLI = getTLI(); 294 int ISD = TLI->InstructionOpcodeToISD(Opcode); 295 assert(ISD && "Invalid opcode"); 296 297 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 298 299 bool IsFloat = Ty->getScalarType()->isFloatingPointTy(); 300 // Assume that floating point arithmetic operations cost twice as much as 301 // integer operations. 302 unsigned OpCost = (IsFloat ? 2 : 1); 303 304 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 305 // The operation is legal. Assume it costs 1. 306 // If the type is split to multiple registers, assume that there is some 307 // overhead to this. 308 // TODO: Once we have extract/insert subvector cost we need to use them. 309 if (LT.first > 1) 310 return LT.first * 2 * OpCost; 311 return LT.first * 1 * OpCost; 312 } 313 314 if (!TLI->isOperationExpand(ISD, LT.second)) { 315 // If the operation is custom lowered then assume 316 // thare the code is twice as expensive. 317 return LT.first * 2 * OpCost; 318 } 319 320 // Else, assume that we need to scalarize this op. 321 if (Ty->isVectorTy()) { 322 unsigned Num = Ty->getVectorNumElements(); 323 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType()); 324 // return the cost of multiple scalar invocation plus the cost of inserting 325 // and extracting the values. 326 return getScalarizationOverhead(Ty, true, true) + Num * Cost; 327 } 328 329 // We don't know anything about this scalar instruction. 330 return OpCost; 331 } 332 333 unsigned BasicTTI::getAltShuffleOverhead(Type *Ty) const { 334 assert(Ty->isVectorTy() && "Can only shuffle vectors"); 335 unsigned Cost = 0; 336 // Shuffle cost is equal to the cost of extracting element from its argument 337 // plus the cost of inserting them onto the result vector. 338 339 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from index 340 // 0 of first vector, index 1 of second vector,index 2 of first vector and 341 // finally index 3 of second vector and insert them at index <0,1,2,3> of 342 // result vector. 343 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 344 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 345 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 346 } 347 return Cost; 348 } 349 350 unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 351 Type *SubTp) const { 352 if (Kind == SK_Alternate) { 353 return getAltShuffleOverhead(Tp); 354 } 355 return 1; 356 } 357 358 unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst, 359 Type *Src) const { 360 const TargetLoweringBase *TLI = getTLI(); 361 int ISD = TLI->InstructionOpcodeToISD(Opcode); 362 assert(ISD && "Invalid opcode"); 363 364 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src); 365 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst); 366 367 // Check for NOOP conversions. 368 if (SrcLT.first == DstLT.first && 369 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { 370 371 // Bitcast between types that are legalized to the same type are free. 372 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc) 373 return 0; 374 } 375 376 if (Opcode == Instruction::Trunc && 377 TLI->isTruncateFree(SrcLT.second, DstLT.second)) 378 return 0; 379 380 if (Opcode == Instruction::ZExt && 381 TLI->isZExtFree(SrcLT.second, DstLT.second)) 382 return 0; 383 384 // If the cast is marked as legal (or promote) then assume low cost. 385 if (SrcLT.first == DstLT.first && 386 TLI->isOperationLegalOrPromote(ISD, DstLT.second)) 387 return 1; 388 389 // Handle scalar conversions. 390 if (!Src->isVectorTy() && !Dst->isVectorTy()) { 391 392 // Scalar bitcasts are usually free. 393 if (Opcode == Instruction::BitCast) 394 return 0; 395 396 // Just check the op cost. If the operation is legal then assume it costs 1. 397 if (!TLI->isOperationExpand(ISD, DstLT.second)) 398 return 1; 399 400 // Assume that illegal scalar instruction are expensive. 401 return 4; 402 } 403 404 // Check vector-to-vector casts. 405 if (Dst->isVectorTy() && Src->isVectorTy()) { 406 407 // If the cast is between same-sized registers, then the check is simple. 408 if (SrcLT.first == DstLT.first && 409 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { 410 411 // Assume that Zext is done using AND. 412 if (Opcode == Instruction::ZExt) 413 return 1; 414 415 // Assume that sext is done using SHL and SRA. 416 if (Opcode == Instruction::SExt) 417 return 2; 418 419 // Just check the op cost. If the operation is legal then assume it costs 420 // 1 and multiply by the type-legalization overhead. 421 if (!TLI->isOperationExpand(ISD, DstLT.second)) 422 return SrcLT.first * 1; 423 } 424 425 // If we are converting vectors and the operation is illegal, or 426 // if the vectors are legalized to different types, estimate the 427 // scalarization costs. 428 unsigned Num = Dst->getVectorNumElements(); 429 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(), 430 Src->getScalarType()); 431 432 // Return the cost of multiple scalar invocation plus the cost of 433 // inserting and extracting the values. 434 return getScalarizationOverhead(Dst, true, true) + Num * Cost; 435 } 436 437 // We already handled vector-to-vector and scalar-to-scalar conversions. This 438 // is where we handle bitcast between vectors and scalars. We need to assume 439 // that the conversion is scalarized in one way or another. 440 if (Opcode == Instruction::BitCast) 441 // Illegal bitcasts are done by storing and loading from a stack slot. 442 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) + 443 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0); 444 445 llvm_unreachable("Unhandled cast"); 446 } 447 448 unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const { 449 // Branches are assumed to be predicted. 450 return 0; 451 } 452 453 unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 454 Type *CondTy) const { 455 const TargetLoweringBase *TLI = getTLI(); 456 int ISD = TLI->InstructionOpcodeToISD(Opcode); 457 assert(ISD && "Invalid opcode"); 458 459 // Selects on vectors are actually vector selects. 460 if (ISD == ISD::SELECT) { 461 assert(CondTy && "CondTy must exist"); 462 if (CondTy->isVectorTy()) 463 ISD = ISD::VSELECT; 464 } 465 466 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 467 468 if (!TLI->isOperationExpand(ISD, LT.second)) { 469 // The operation is legal. Assume it costs 1. Multiply 470 // by the type-legalization overhead. 471 return LT.first * 1; 472 } 473 474 // Otherwise, assume that the cast is scalarized. 475 if (ValTy->isVectorTy()) { 476 unsigned Num = ValTy->getVectorNumElements(); 477 if (CondTy) 478 CondTy = CondTy->getScalarType(); 479 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(), 480 CondTy); 481 482 // Return the cost of multiple scalar invocation plus the cost of inserting 483 // and extracting the values. 484 return getScalarizationOverhead(ValTy, true, false) + Num * Cost; 485 } 486 487 // Unknown scalar opcode. 488 return 1; 489 } 490 491 unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val, 492 unsigned Index) const { 493 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Val->getScalarType()); 494 495 return LT.first; 496 } 497 498 unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src, 499 unsigned Alignment, 500 unsigned AddressSpace) const { 501 assert(!Src->isVoidTy() && "Invalid type"); 502 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src); 503 504 // Assuming that all loads of legal types cost 1. 505 unsigned Cost = LT.first; 506 507 if (Src->isVectorTy() && 508 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) { 509 // This is a vector load that legalizes to a larger type than the vector 510 // itself. Unless the corresponding extending load or truncating store is 511 // legal, then this will scalarize. 512 TargetLowering::LegalizeAction LA = TargetLowering::Expand; 513 EVT MemVT = getTLI()->getValueType(Src, true); 514 if (MemVT.isSimple() && MemVT != MVT::Other) { 515 if (Opcode == Instruction::Store) 516 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT()); 517 else 518 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, MemVT.getSimpleVT()); 519 } 520 521 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) { 522 // This is a vector load/store for some illegal type that is scalarized. 523 // We must account for the cost of building or decomposing the vector. 524 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store, 525 Opcode == Instruction::Store); 526 } 527 } 528 529 return Cost; 530 } 531 532 unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 533 ArrayRef<Type *> Tys) const { 534 unsigned ISD = 0; 535 switch (IID) { 536 default: { 537 // Assume that we need to scalarize this intrinsic. 538 unsigned ScalarizationCost = 0; 539 unsigned ScalarCalls = 1; 540 if (RetTy->isVectorTy()) { 541 ScalarizationCost = getScalarizationOverhead(RetTy, true, false); 542 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 543 } 544 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { 545 if (Tys[i]->isVectorTy()) { 546 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true); 547 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 548 } 549 } 550 551 return ScalarCalls + ScalarizationCost; 552 } 553 // Look for intrinsics that can be lowered directly or turned into a scalar 554 // intrinsic call. 555 case Intrinsic::sqrt: ISD = ISD::FSQRT; break; 556 case Intrinsic::sin: ISD = ISD::FSIN; break; 557 case Intrinsic::cos: ISD = ISD::FCOS; break; 558 case Intrinsic::exp: ISD = ISD::FEXP; break; 559 case Intrinsic::exp2: ISD = ISD::FEXP2; break; 560 case Intrinsic::log: ISD = ISD::FLOG; break; 561 case Intrinsic::log10: ISD = ISD::FLOG10; break; 562 case Intrinsic::log2: ISD = ISD::FLOG2; break; 563 case Intrinsic::fabs: ISD = ISD::FABS; break; 564 case Intrinsic::copysign: ISD = ISD::FCOPYSIGN; break; 565 case Intrinsic::floor: ISD = ISD::FFLOOR; break; 566 case Intrinsic::ceil: ISD = ISD::FCEIL; break; 567 case Intrinsic::trunc: ISD = ISD::FTRUNC; break; 568 case Intrinsic::nearbyint: 569 ISD = ISD::FNEARBYINT; break; 570 case Intrinsic::rint: ISD = ISD::FRINT; break; 571 case Intrinsic::round: ISD = ISD::FROUND; break; 572 case Intrinsic::pow: ISD = ISD::FPOW; break; 573 case Intrinsic::fma: ISD = ISD::FMA; break; 574 case Intrinsic::fmuladd: ISD = ISD::FMA; break; 575 case Intrinsic::lifetime_start: 576 case Intrinsic::lifetime_end: 577 return 0; 578 } 579 580 const TargetLoweringBase *TLI = getTLI(); 581 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy); 582 583 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 584 // The operation is legal. Assume it costs 1. 585 // If the type is split to multiple registers, assume that thre is some 586 // overhead to this. 587 // TODO: Once we have extract/insert subvector cost we need to use them. 588 if (LT.first > 1) 589 return LT.first * 2; 590 return LT.first * 1; 591 } 592 593 if (!TLI->isOperationExpand(ISD, LT.second)) { 594 // If the operation is custom lowered then assume 595 // thare the code is twice as expensive. 596 return LT.first * 2; 597 } 598 599 // If we can't lower fmuladd into an FMA estimate the cost as a floating 600 // point mul followed by an add. 601 if (IID == Intrinsic::fmuladd) 602 return TopTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) + 603 TopTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy); 604 605 // Else, assume that we need to scalarize this intrinsic. For math builtins 606 // this will emit a costly libcall, adding call overhead and spills. Make it 607 // very expensive. 608 if (RetTy->isVectorTy()) { 609 unsigned Num = RetTy->getVectorNumElements(); 610 unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(), 611 Tys); 612 return 10 * Cost * Num; 613 } 614 615 // This is going to be turned into a library call, make it expensive. 616 return 10; 617 } 618 619 unsigned BasicTTI::getNumberOfParts(Type *Tp) const { 620 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp); 621 return LT.first; 622 } 623 624 unsigned BasicTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 625 return 0; 626 } 627 628 unsigned BasicTTI::getReductionCost(unsigned Opcode, Type *Ty, 629 bool IsPairwise) const { 630 assert(Ty->isVectorTy() && "Expect a vector type"); 631 unsigned NumVecElts = Ty->getVectorNumElements(); 632 unsigned NumReduxLevels = Log2_32(NumVecElts); 633 unsigned ArithCost = NumReduxLevels * 634 TopTTI->getArithmeticInstrCost(Opcode, Ty); 635 // Assume the pairwise shuffles add a cost. 636 unsigned ShuffleCost = 637 NumReduxLevels * (IsPairwise + 1) * 638 TopTTI->getShuffleCost(SK_ExtractSubvector, Ty, NumVecElts / 2, Ty); 639 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true); 640 } 641