1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file provides helpers for the implementation of 11 /// a TargetTransformInfo-conforming class. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 16 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 17 18 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 19 #include "llvm/Analysis/TargetTransformInfo.h" 20 #include "llvm/IR/CallSite.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/GetElementPtrTypeIterator.h" 24 #include "llvm/IR/Operator.h" 25 #include "llvm/IR/Type.h" 26 #include "llvm/Analysis/VectorUtils.h" 27 28 namespace llvm { 29 30 /// \brief Base class for use as a mix-in that aids implementing 31 /// a TargetTransformInfo-compatible class. 32 class TargetTransformInfoImplBase { 33 protected: 34 typedef TargetTransformInfo TTI; 35 36 const DataLayout &DL; 37 38 explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {} 39 40 public: 41 // Provide value semantics. MSVC requires that we spell all of these out. 42 TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg) 43 : DL(Arg.DL) {} 44 TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {} 45 46 const DataLayout &getDataLayout() const { return DL; } 47 48 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) { 49 switch (Opcode) { 50 default: 51 // By default, just classify everything as 'basic'. 52 return TTI::TCC_Basic; 53 54 case Instruction::GetElementPtr: 55 llvm_unreachable("Use getGEPCost for GEP operations!"); 56 57 case Instruction::BitCast: 58 assert(OpTy && "Cast instructions must provide the operand type"); 59 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy())) 60 // Identity and pointer-to-pointer casts are free. 61 return TTI::TCC_Free; 62 63 // Otherwise, the default basic cost is used. 64 return TTI::TCC_Basic; 65 66 case Instruction::FDiv: 67 case Instruction::FRem: 68 case Instruction::SDiv: 69 case Instruction::SRem: 70 case Instruction::UDiv: 71 case Instruction::URem: 72 return TTI::TCC_Expensive; 73 74 case Instruction::IntToPtr: { 75 // An inttoptr cast is free so long as the input is a legal integer type 76 // which doesn't contain values outside the range of a pointer. 77 unsigned OpSize = OpTy->getScalarSizeInBits(); 78 if (DL.isLegalInteger(OpSize) && 79 OpSize <= DL.getPointerTypeSizeInBits(Ty)) 80 return TTI::TCC_Free; 81 82 // Otherwise it's not a no-op. 83 return TTI::TCC_Basic; 84 } 85 case Instruction::PtrToInt: { 86 // A ptrtoint cast is free so long as the result is large enough to store 87 // the pointer, and a legal integer type. 88 unsigned DestSize = Ty->getScalarSizeInBits(); 89 if (DL.isLegalInteger(DestSize) && 90 DestSize >= DL.getPointerTypeSizeInBits(OpTy)) 91 return TTI::TCC_Free; 92 93 // Otherwise it's not a no-op. 94 return TTI::TCC_Basic; 95 } 96 case Instruction::Trunc: 97 // trunc to a native type is free (assuming the target has compare and 98 // shift-right of the same width). 99 if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty))) 100 return TTI::TCC_Free; 101 102 return TTI::TCC_Basic; 103 } 104 } 105 106 int getGEPCost(Type *PointeeType, const Value *Ptr, 107 ArrayRef<const Value *> Operands) { 108 // In the basic model, we just assume that all-constant GEPs will be folded 109 // into their uses via addressing modes. 110 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx) 111 if (!isa<Constant>(Operands[Idx])) 112 return TTI::TCC_Basic; 113 114 return TTI::TCC_Free; 115 } 116 117 unsigned getCallCost(FunctionType *FTy, int NumArgs) { 118 assert(FTy && "FunctionType must be provided to this routine."); 119 120 // The target-independent implementation just measures the size of the 121 // function by approximating that each argument will take on average one 122 // instruction to prepare. 123 124 if (NumArgs < 0) 125 // Set the argument number to the number of explicit arguments in the 126 // function. 127 NumArgs = FTy->getNumParams(); 128 129 return TTI::TCC_Basic * (NumArgs + 1); 130 } 131 132 unsigned getInliningThresholdMultiplier() { return 1; } 133 134 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 135 ArrayRef<Type *> ParamTys) { 136 switch (IID) { 137 default: 138 // Intrinsics rarely (if ever) have normal argument setup constraints. 139 // Model them as having a basic instruction cost. 140 // FIXME: This is wrong for libc intrinsics. 141 return TTI::TCC_Basic; 142 143 case Intrinsic::annotation: 144 case Intrinsic::assume: 145 case Intrinsic::dbg_declare: 146 case Intrinsic::dbg_value: 147 case Intrinsic::invariant_start: 148 case Intrinsic::invariant_end: 149 case Intrinsic::lifetime_start: 150 case Intrinsic::lifetime_end: 151 case Intrinsic::objectsize: 152 case Intrinsic::ptr_annotation: 153 case Intrinsic::var_annotation: 154 case Intrinsic::experimental_gc_result: 155 case Intrinsic::experimental_gc_relocate: 156 case Intrinsic::coro_alloc: 157 case Intrinsic::coro_begin: 158 case Intrinsic::coro_free: 159 case Intrinsic::coro_end: 160 case Intrinsic::coro_frame: 161 case Intrinsic::coro_size: 162 case Intrinsic::coro_suspend: 163 case Intrinsic::coro_param: 164 case Intrinsic::coro_subfn_addr: 165 // These intrinsics don't actually represent code after lowering. 166 return TTI::TCC_Free; 167 } 168 } 169 170 bool hasBranchDivergence() { return false; } 171 172 bool isSourceOfDivergence(const Value *V) { return false; } 173 174 unsigned getFlatAddressSpace () { 175 return -1; 176 } 177 178 bool isLoweredToCall(const Function *F) { 179 // FIXME: These should almost certainly not be handled here, and instead 180 // handled with the help of TLI or the target itself. This was largely 181 // ported from existing analysis heuristics here so that such refactorings 182 // can take place in the future. 183 184 if (F->isIntrinsic()) 185 return false; 186 187 if (F->hasLocalLinkage() || !F->hasName()) 188 return true; 189 190 StringRef Name = F->getName(); 191 192 // These will all likely lower to a single selection DAG node. 193 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || 194 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" || 195 Name == "fmin" || Name == "fminf" || Name == "fminl" || 196 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" || 197 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" || 198 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") 199 return false; 200 201 // These are all likely to be optimized into something smaller. 202 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" || 203 Name == "exp2l" || Name == "exp2f" || Name == "floor" || 204 Name == "floorf" || Name == "ceil" || Name == "round" || 205 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" || 206 Name == "llabs") 207 return false; 208 209 return true; 210 } 211 212 void getUnrollingPreferences(Loop *, TTI::UnrollingPreferences &) {} 213 214 bool isLegalAddImmediate(int64_t Imm) { return false; } 215 216 bool isLegalICmpImmediate(int64_t Imm) { return false; } 217 218 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 219 bool HasBaseReg, int64_t Scale, 220 unsigned AddrSpace) { 221 // Guess that only reg and reg+reg addressing is allowed. This heuristic is 222 // taken from the implementation of LSR. 223 return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1); 224 } 225 226 bool isLegalMaskedStore(Type *DataType) { return false; } 227 228 bool isLegalMaskedLoad(Type *DataType) { return false; } 229 230 bool isLegalMaskedScatter(Type *DataType) { return false; } 231 232 bool isLegalMaskedGather(Type *DataType) { return false; } 233 234 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 235 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { 236 // Guess that all legal addressing mode are free. 237 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, 238 Scale, AddrSpace)) 239 return 0; 240 return -1; 241 } 242 243 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) { return true; } 244 245 bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; } 246 247 bool isProfitableToHoist(Instruction *I) { return true; } 248 249 bool isTypeLegal(Type *Ty) { return false; } 250 251 unsigned getJumpBufAlignment() { return 0; } 252 253 unsigned getJumpBufSize() { return 0; } 254 255 bool shouldBuildLookupTables() { return true; } 256 bool shouldBuildLookupTablesForConstant(Constant *C) { return true; } 257 258 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 259 return 0; 260 } 261 262 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, 263 unsigned VF) { return 0; } 264 265 bool supportsEfficientVectorElementLoadStore() { return false; } 266 267 bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; } 268 269 bool enableInterleavedAccessVectorization() { return false; } 270 271 bool isFPVectorizationPotentiallyUnsafe() { return false; } 272 273 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, 274 unsigned BitWidth, 275 unsigned AddressSpace, 276 unsigned Alignment, 277 bool *Fast) { return false; } 278 279 TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) { 280 return TTI::PSK_Software; 281 } 282 283 bool haveFastSqrt(Type *Ty) { return false; } 284 285 unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; } 286 287 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 288 Type *Ty) { 289 return 0; 290 } 291 292 unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; } 293 294 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 295 Type *Ty) { 296 return TTI::TCC_Free; 297 } 298 299 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 300 Type *Ty) { 301 return TTI::TCC_Free; 302 } 303 304 unsigned getNumberOfRegisters(bool Vector) { return 8; } 305 306 unsigned getRegisterBitWidth(bool Vector) { return 32; } 307 308 bool 309 shouldConsiderAddressTypePromotion(const Instruction &I, 310 bool &AllowPromotionWithoutCommonHeader) { 311 AllowPromotionWithoutCommonHeader = false; 312 return false; 313 } 314 315 unsigned getCacheLineSize() { return 0; } 316 317 unsigned getPrefetchDistance() { return 0; } 318 319 unsigned getMinPrefetchStride() { return 1; } 320 321 unsigned getMaxPrefetchIterationsAhead() { return UINT_MAX; } 322 323 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; } 324 325 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 326 TTI::OperandValueKind Opd1Info, 327 TTI::OperandValueKind Opd2Info, 328 TTI::OperandValueProperties Opd1PropInfo, 329 TTI::OperandValueProperties Opd2PropInfo, 330 ArrayRef<const Value *> Args) { 331 return 1; 332 } 333 334 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index, 335 Type *SubTp) { 336 return 1; 337 } 338 339 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 340 const Instruction *I) { return 1; } 341 342 unsigned getExtractWithExtendCost(unsigned Opcode, Type *Dst, 343 VectorType *VecTy, unsigned Index) { 344 return 1; 345 } 346 347 unsigned getCFInstrCost(unsigned Opcode) { return 1; } 348 349 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 350 const Instruction *I) { 351 return 1; 352 } 353 354 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 355 return 1; 356 } 357 358 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 359 unsigned AddressSpace, const Instruction *I) { 360 return 1; 361 } 362 363 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 364 unsigned AddressSpace) { 365 return 1; 366 } 367 368 unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr, 369 bool VariableMask, 370 unsigned Alignment) { 371 return 1; 372 } 373 374 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 375 unsigned Factor, 376 ArrayRef<unsigned> Indices, 377 unsigned Alignment, 378 unsigned AddressSpace) { 379 return 1; 380 } 381 382 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 383 ArrayRef<Type *> Tys, FastMathFlags FMF, 384 unsigned ScalarizationCostPassed) { 385 return 1; 386 } 387 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 388 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) { 389 return 1; 390 } 391 392 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) { 393 return 1; 394 } 395 396 unsigned getNumberOfParts(Type *Tp) { return 0; } 397 398 unsigned getAddressComputationCost(Type *Tp, ScalarEvolution *, 399 const SCEV *) { 400 return 0; 401 } 402 403 unsigned getReductionCost(unsigned, Type *, bool) { return 1; } 404 405 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; } 406 407 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) { 408 return false; 409 } 410 411 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 412 Type *ExpectedType) { 413 return nullptr; 414 } 415 416 bool areInlineCompatible(const Function *Caller, 417 const Function *Callee) const { 418 return (Caller->getFnAttribute("target-cpu") == 419 Callee->getFnAttribute("target-cpu")) && 420 (Caller->getFnAttribute("target-features") == 421 Callee->getFnAttribute("target-features")); 422 } 423 424 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; } 425 426 bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; } 427 428 bool isLegalToVectorizeStore(StoreInst *SI) const { return true; } 429 430 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, 431 unsigned Alignment, 432 unsigned AddrSpace) const { 433 return true; 434 } 435 436 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, 437 unsigned Alignment, 438 unsigned AddrSpace) const { 439 return true; 440 } 441 442 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, 443 unsigned ChainSizeInBytes, 444 VectorType *VecTy) const { 445 return VF; 446 } 447 448 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, 449 unsigned ChainSizeInBytes, 450 VectorType *VecTy) const { 451 return VF; 452 } 453 protected: 454 // Obtain the minimum required size to hold the value (without the sign) 455 // In case of a vector it returns the min required size for one element. 456 unsigned minRequiredElementSize(const Value* Val, bool &isSigned) { 457 if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) { 458 const auto* VectorValue = cast<Constant>(Val); 459 460 // In case of a vector need to pick the max between the min 461 // required size for each element 462 auto *VT = cast<VectorType>(Val->getType()); 463 464 // Assume unsigned elements 465 isSigned = false; 466 467 // The max required size is the total vector width divided by num 468 // of elements in the vector 469 unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements(); 470 471 unsigned MinRequiredSize = 0; 472 for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) { 473 if (auto* IntElement = 474 dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) { 475 bool signedElement = IntElement->getValue().isNegative(); 476 // Get the element min required size. 477 unsigned ElementMinRequiredSize = 478 IntElement->getValue().getMinSignedBits() - 1; 479 // In case one element is signed then all the vector is signed. 480 isSigned |= signedElement; 481 // Save the max required bit size between all the elements. 482 MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize); 483 } 484 else { 485 // not an int constant element 486 return MaxRequiredSize; 487 } 488 } 489 return MinRequiredSize; 490 } 491 492 if (const auto* CI = dyn_cast<ConstantInt>(Val)) { 493 isSigned = CI->getValue().isNegative(); 494 return CI->getValue().getMinSignedBits() - 1; 495 } 496 497 if (const auto* Cast = dyn_cast<SExtInst>(Val)) { 498 isSigned = true; 499 return Cast->getSrcTy()->getScalarSizeInBits() - 1; 500 } 501 502 if (const auto* Cast = dyn_cast<ZExtInst>(Val)) { 503 isSigned = false; 504 return Cast->getSrcTy()->getScalarSizeInBits(); 505 } 506 507 isSigned = false; 508 return Val->getType()->getScalarSizeInBits(); 509 } 510 511 bool isStridedAccess(const SCEV *Ptr) { 512 return Ptr && isa<SCEVAddRecExpr>(Ptr); 513 } 514 515 const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE, 516 const SCEV *Ptr) { 517 if (!isStridedAccess(Ptr)) 518 return nullptr; 519 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr); 520 return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE)); 521 } 522 523 bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr, 524 int64_t MergeDistance) { 525 const SCEVConstant *Step = getConstantStrideStep(SE, Ptr); 526 if (!Step) 527 return false; 528 APInt StrideVal = Step->getAPInt(); 529 if (StrideVal.getBitWidth() > 64) 530 return false; 531 // FIXME: need to take absolute value for negtive stride case 532 return StrideVal.getSExtValue() < MergeDistance; 533 } 534 }; 535 536 /// \brief CRTP base class for use as a mix-in that aids implementing 537 /// a TargetTransformInfo-compatible class. 538 template <typename T> 539 class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase { 540 private: 541 typedef TargetTransformInfoImplBase BaseT; 542 543 protected: 544 explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {} 545 546 public: 547 using BaseT::getCallCost; 548 549 unsigned getCallCost(const Function *F, int NumArgs) { 550 assert(F && "A concrete function must be provided to this routine."); 551 552 if (NumArgs < 0) 553 // Set the argument number to the number of explicit arguments in the 554 // function. 555 NumArgs = F->arg_size(); 556 557 if (Intrinsic::ID IID = F->getIntrinsicID()) { 558 FunctionType *FTy = F->getFunctionType(); 559 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end()); 560 return static_cast<T *>(this) 561 ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys); 562 } 563 564 if (!static_cast<T *>(this)->isLoweredToCall(F)) 565 return TTI::TCC_Basic; // Give a basic cost if it will be lowered 566 // directly. 567 568 return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs); 569 } 570 571 unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) { 572 // Simply delegate to generic handling of the call. 573 // FIXME: We should use instsimplify or something else to catch calls which 574 // will constant fold with these arguments. 575 return static_cast<T *>(this)->getCallCost(F, Arguments.size()); 576 } 577 578 using BaseT::getGEPCost; 579 580 int getGEPCost(Type *PointeeType, const Value *Ptr, 581 ArrayRef<const Value *> Operands) { 582 const GlobalValue *BaseGV = nullptr; 583 if (Ptr != nullptr) { 584 // TODO: will remove this when pointers have an opaque type. 585 assert(Ptr->getType()->getScalarType()->getPointerElementType() == 586 PointeeType && 587 "explicit pointee type doesn't match operand's pointee type"); 588 BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts()); 589 } 590 bool HasBaseReg = (BaseGV == nullptr); 591 int64_t BaseOffset = 0; 592 int64_t Scale = 0; 593 594 auto GTI = gep_type_begin(PointeeType, Operands); 595 Type *TargetType; 596 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) { 597 TargetType = GTI.getIndexedType(); 598 // We assume that the cost of Scalar GEP with constant index and the 599 // cost of Vector GEP with splat constant index are the same. 600 const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I); 601 if (!ConstIdx) 602 if (auto Splat = getSplatValue(*I)) 603 ConstIdx = dyn_cast<ConstantInt>(Splat); 604 if (StructType *STy = GTI.getStructTypeOrNull()) { 605 // For structures the index is always splat or scalar constant 606 assert(ConstIdx && "Unexpected GEP index"); 607 uint64_t Field = ConstIdx->getZExtValue(); 608 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); 609 } else { 610 int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType()); 611 if (ConstIdx) 612 BaseOffset += ConstIdx->getSExtValue() * ElementSize; 613 else { 614 // Needs scale register. 615 if (Scale != 0) 616 // No addressing mode takes two scale registers. 617 return TTI::TCC_Basic; 618 Scale = ElementSize; 619 } 620 } 621 } 622 623 // Assumes the address space is 0 when Ptr is nullptr. 624 unsigned AS = 625 (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); 626 if (static_cast<T *>(this)->isLegalAddressingMode( 627 TargetType, const_cast<GlobalValue *>(BaseGV), BaseOffset, 628 HasBaseReg, Scale, AS)) 629 return TTI::TCC_Free; 630 return TTI::TCC_Basic; 631 } 632 633 using BaseT::getIntrinsicCost; 634 635 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 636 ArrayRef<const Value *> Arguments) { 637 // Delegate to the generic intrinsic handling code. This mostly provides an 638 // opportunity for targets to (for example) special case the cost of 639 // certain intrinsics based on constants used as arguments. 640 SmallVector<Type *, 8> ParamTys; 641 ParamTys.reserve(Arguments.size()); 642 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 643 ParamTys.push_back(Arguments[Idx]->getType()); 644 return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys); 645 } 646 647 unsigned getUserCost(const User *U) { 648 if (isa<PHINode>(U)) 649 return TTI::TCC_Free; // Model all PHI nodes as free. 650 651 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { 652 SmallVector<Value *, 4> Indices(GEP->idx_begin(), GEP->idx_end()); 653 return static_cast<T *>(this)->getGEPCost( 654 GEP->getSourceElementType(), GEP->getPointerOperand(), Indices); 655 } 656 657 if (auto CS = ImmutableCallSite(U)) { 658 const Function *F = CS.getCalledFunction(); 659 if (!F) { 660 // Just use the called value type. 661 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType(); 662 return static_cast<T *>(this) 663 ->getCallCost(cast<FunctionType>(FTy), CS.arg_size()); 664 } 665 666 SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end()); 667 return static_cast<T *>(this)->getCallCost(F, Arguments); 668 } 669 670 if (const CastInst *CI = dyn_cast<CastInst>(U)) { 671 // Result of a cmp instruction is often extended (to be used by other 672 // cmp instructions, logical or return instructions). These are usually 673 // nop on most sane targets. 674 if (isa<CmpInst>(CI->getOperand(0))) 675 return TTI::TCC_Free; 676 } 677 678 return static_cast<T *>(this)->getOperationCost( 679 Operator::getOpcode(U), U->getType(), 680 U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr); 681 } 682 }; 683 } 684 685 #endif 686