1 //===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file provides helpers for the implementation of 11 /// a TargetTransformInfo-conforming class. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 16 #define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H 17 18 #include "llvm/Analysis/TargetTransformInfo.h" 19 #include "llvm/IR/CallSite.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/Operator.h" 23 #include "llvm/IR/Type.h" 24 25 namespace llvm { 26 27 /// \brief Base class for use as a mix-in that aids implementing 28 /// a TargetTransformInfo-compatible class. 29 class TargetTransformInfoImplBase { 30 protected: 31 typedef TargetTransformInfo TTI; 32 33 const DataLayout *DL; 34 35 explicit TargetTransformInfoImplBase(const DataLayout *DL) 36 : DL(DL) {} 37 38 public: 39 // Provide value semantics. MSVC requires that we spell all of these out. 40 TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg) 41 : DL(Arg.DL) {} 42 TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) 43 : DL(std::move(Arg.DL)) {} 44 TargetTransformInfoImplBase & 45 operator=(const TargetTransformInfoImplBase &RHS) { 46 DL = RHS.DL; 47 return *this; 48 } 49 TargetTransformInfoImplBase &operator=(TargetTransformInfoImplBase &&RHS) { 50 DL = std::move(RHS.DL); 51 return *this; 52 } 53 54 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) { 55 switch (Opcode) { 56 default: 57 // By default, just classify everything as 'basic'. 58 return TTI::TCC_Basic; 59 60 case Instruction::GetElementPtr: 61 llvm_unreachable("Use getGEPCost for GEP operations!"); 62 63 case Instruction::BitCast: 64 assert(OpTy && "Cast instructions must provide the operand type"); 65 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy())) 66 // Identity and pointer-to-pointer casts are free. 67 return TTI::TCC_Free; 68 69 // Otherwise, the default basic cost is used. 70 return TTI::TCC_Basic; 71 72 case Instruction::IntToPtr: { 73 if (!DL) 74 return TTI::TCC_Basic; 75 76 // An inttoptr cast is free so long as the input is a legal integer type 77 // which doesn't contain values outside the range of a pointer. 78 unsigned OpSize = OpTy->getScalarSizeInBits(); 79 if (DL->isLegalInteger(OpSize) && 80 OpSize <= DL->getPointerTypeSizeInBits(Ty)) 81 return TTI::TCC_Free; 82 83 // Otherwise it's not a no-op. 84 return TTI::TCC_Basic; 85 } 86 case Instruction::PtrToInt: { 87 if (!DL) 88 return TTI::TCC_Basic; 89 90 // A ptrtoint cast is free so long as the result is large enough to store 91 // the pointer, and a legal integer type. 92 unsigned DestSize = Ty->getScalarSizeInBits(); 93 if (DL->isLegalInteger(DestSize) && 94 DestSize >= DL->getPointerTypeSizeInBits(OpTy)) 95 return TTI::TCC_Free; 96 97 // Otherwise it's not a no-op. 98 return TTI::TCC_Basic; 99 } 100 case Instruction::Trunc: 101 // trunc to a native type is free (assuming the target has compare and 102 // shift-right of the same width). 103 if (DL && DL->isLegalInteger(DL->getTypeSizeInBits(Ty))) 104 return TTI::TCC_Free; 105 106 return TTI::TCC_Basic; 107 } 108 } 109 110 unsigned getGEPCost(const Value *Ptr, ArrayRef<const Value *> Operands) { 111 // In the basic model, we just assume that all-constant GEPs will be folded 112 // into their uses via addressing modes. 113 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx) 114 if (!isa<Constant>(Operands[Idx])) 115 return TTI::TCC_Basic; 116 117 return TTI::TCC_Free; 118 } 119 120 unsigned getCallCost(FunctionType *FTy, int NumArgs) { 121 assert(FTy && "FunctionType must be provided to this routine."); 122 123 // The target-independent implementation just measures the size of the 124 // function by approximating that each argument will take on average one 125 // instruction to prepare. 126 127 if (NumArgs < 0) 128 // Set the argument number to the number of explicit arguments in the 129 // function. 130 NumArgs = FTy->getNumParams(); 131 132 return TTI::TCC_Basic * (NumArgs + 1); 133 } 134 135 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 136 ArrayRef<Type *> ParamTys) { 137 switch (IID) { 138 default: 139 // Intrinsics rarely (if ever) have normal argument setup constraints. 140 // Model them as having a basic instruction cost. 141 // FIXME: This is wrong for libc intrinsics. 142 return TTI::TCC_Basic; 143 144 case Intrinsic::annotation: 145 case Intrinsic::assume: 146 case Intrinsic::dbg_declare: 147 case Intrinsic::dbg_value: 148 case Intrinsic::invariant_start: 149 case Intrinsic::invariant_end: 150 case Intrinsic::lifetime_start: 151 case Intrinsic::lifetime_end: 152 case Intrinsic::objectsize: 153 case Intrinsic::ptr_annotation: 154 case Intrinsic::var_annotation: 155 case Intrinsic::experimental_gc_result_int: 156 case Intrinsic::experimental_gc_result_float: 157 case Intrinsic::experimental_gc_result_ptr: 158 case Intrinsic::experimental_gc_result: 159 case Intrinsic::experimental_gc_relocate: 160 // These intrinsics don't actually represent code after lowering. 161 return TTI::TCC_Free; 162 } 163 } 164 165 bool hasBranchDivergence() { return false; } 166 167 bool isSourceOfDivergence(const Value *V) { return false; } 168 169 bool isLoweredToCall(const Function *F) { 170 // FIXME: These should almost certainly not be handled here, and instead 171 // handled with the help of TLI or the target itself. This was largely 172 // ported from existing analysis heuristics here so that such refactorings 173 // can take place in the future. 174 175 if (F->isIntrinsic()) 176 return false; 177 178 if (F->hasLocalLinkage() || !F->hasName()) 179 return true; 180 181 StringRef Name = F->getName(); 182 183 // These will all likely lower to a single selection DAG node. 184 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || 185 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" || 186 Name == "fmin" || Name == "fminf" || Name == "fminl" || 187 Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" || 188 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" || 189 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") 190 return false; 191 192 // These are all likely to be optimized into something smaller. 193 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" || 194 Name == "exp2l" || Name == "exp2f" || Name == "floor" || 195 Name == "floorf" || Name == "ceil" || Name == "round" || 196 Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" || 197 Name == "llabs") 198 return false; 199 200 return true; 201 } 202 203 void getUnrollingPreferences(Loop *, TTI::UnrollingPreferences &) {} 204 205 bool isLegalAddImmediate(int64_t Imm) { return false; } 206 207 bool isLegalICmpImmediate(int64_t Imm) { return false; } 208 209 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 210 bool HasBaseReg, int64_t Scale) { 211 // Guess that reg+reg addressing is allowed. This heuristic is taken from 212 // the implementation of LSR. 213 return !BaseGV && BaseOffset == 0 && Scale <= 1; 214 } 215 216 bool isLegalMaskedStore(Type *DataType, int Consecutive) { return false; } 217 218 bool isLegalMaskedLoad(Type *DataType, int Consecutive) { return false; } 219 220 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 221 bool HasBaseReg, int64_t Scale) { 222 // Guess that all legal addressing mode are free. 223 if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale)) 224 return 0; 225 return -1; 226 } 227 228 bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; } 229 230 bool isProfitableToHoist(Instruction *I) { return true; } 231 232 bool isTypeLegal(Type *Ty) { return false; } 233 234 unsigned getJumpBufAlignment() { return 0; } 235 236 unsigned getJumpBufSize() { return 0; } 237 238 bool shouldBuildLookupTables() { return true; } 239 240 bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; } 241 242 TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) { 243 return TTI::PSK_Software; 244 } 245 246 bool haveFastSqrt(Type *Ty) { return false; } 247 248 unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; } 249 250 unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; } 251 252 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 253 Type *Ty) { 254 return TTI::TCC_Free; 255 } 256 257 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 258 Type *Ty) { 259 return TTI::TCC_Free; 260 } 261 262 unsigned getNumberOfRegisters(bool Vector) { return 8; } 263 264 unsigned getRegisterBitWidth(bool Vector) { return 32; } 265 266 unsigned getMaxInterleaveFactor() { return 1; } 267 268 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 269 TTI::OperandValueKind Opd1Info, 270 TTI::OperandValueKind Opd2Info, 271 TTI::OperandValueProperties Opd1PropInfo, 272 TTI::OperandValueProperties Opd2PropInfo) { 273 return 1; 274 } 275 276 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index, 277 Type *SubTp) { 278 return 1; 279 } 280 281 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { return 1; } 282 283 unsigned getCFInstrCost(unsigned Opcode) { return 1; } 284 285 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 286 return 1; 287 } 288 289 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 290 return 1; 291 } 292 293 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 294 unsigned AddressSpace) { 295 return 1; 296 } 297 298 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 299 unsigned AddressSpace) { 300 return 1; 301 } 302 303 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 304 ArrayRef<Type *> Tys) { 305 return 1; 306 } 307 308 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) { 309 return 1; 310 } 311 312 unsigned getNumberOfParts(Type *Tp) { return 0; } 313 314 unsigned getAddressComputationCost(Type *Tp, bool) { return 0; } 315 316 unsigned getReductionCost(unsigned, Type *, bool) { return 1; } 317 318 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; } 319 320 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) { 321 return false; 322 } 323 324 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 325 Type *ExpectedType) { 326 return nullptr; 327 } 328 }; 329 330 /// \brief CRTP base class for use as a mix-in that aids implementing 331 /// a TargetTransformInfo-compatible class. 332 template <typename T> 333 class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase { 334 private: 335 typedef TargetTransformInfoImplBase BaseT; 336 337 protected: 338 explicit TargetTransformInfoImplCRTPBase(const DataLayout *DL) 339 : BaseT(DL) {} 340 341 public: 342 // Provide value semantics. MSVC requires that we spell all of these out. 343 TargetTransformInfoImplCRTPBase(const TargetTransformInfoImplCRTPBase &Arg) 344 : BaseT(static_cast<const BaseT &>(Arg)) {} 345 TargetTransformInfoImplCRTPBase(TargetTransformInfoImplCRTPBase &&Arg) 346 : BaseT(std::move(static_cast<BaseT &>(Arg))) {} 347 TargetTransformInfoImplCRTPBase & 348 operator=(const TargetTransformInfoImplCRTPBase &RHS) { 349 BaseT::operator=(static_cast<const BaseT &>(RHS)); 350 return *this; 351 } 352 TargetTransformInfoImplCRTPBase & 353 operator=(TargetTransformInfoImplCRTPBase &&RHS) { 354 BaseT::operator=(std::move(static_cast<BaseT &>(RHS))); 355 return *this; 356 } 357 358 using BaseT::getCallCost; 359 360 unsigned getCallCost(const Function *F, int NumArgs) { 361 assert(F && "A concrete function must be provided to this routine."); 362 363 if (NumArgs < 0) 364 // Set the argument number to the number of explicit arguments in the 365 // function. 366 NumArgs = F->arg_size(); 367 368 if (Intrinsic::ID IID = (Intrinsic::ID)F->getIntrinsicID()) { 369 FunctionType *FTy = F->getFunctionType(); 370 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end()); 371 return static_cast<T *>(this) 372 ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys); 373 } 374 375 if (!static_cast<T *>(this)->isLoweredToCall(F)) 376 return TTI::TCC_Basic; // Give a basic cost if it will be lowered 377 // directly. 378 379 return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs); 380 } 381 382 unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) { 383 // Simply delegate to generic handling of the call. 384 // FIXME: We should use instsimplify or something else to catch calls which 385 // will constant fold with these arguments. 386 return static_cast<T *>(this)->getCallCost(F, Arguments.size()); 387 } 388 389 using BaseT::getIntrinsicCost; 390 391 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 392 ArrayRef<const Value *> Arguments) { 393 // Delegate to the generic intrinsic handling code. This mostly provides an 394 // opportunity for targets to (for example) special case the cost of 395 // certain intrinsics based on constants used as arguments. 396 SmallVector<Type *, 8> ParamTys; 397 ParamTys.reserve(Arguments.size()); 398 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 399 ParamTys.push_back(Arguments[Idx]->getType()); 400 return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys); 401 } 402 403 unsigned getUserCost(const User *U) { 404 if (isa<PHINode>(U)) 405 return TTI::TCC_Free; // Model all PHI nodes as free. 406 407 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { 408 SmallVector<const Value *, 4> Indices(GEP->idx_begin(), GEP->idx_end()); 409 return static_cast<T *>(this) 410 ->getGEPCost(GEP->getPointerOperand(), Indices); 411 } 412 413 if (auto CS = ImmutableCallSite(U)) { 414 const Function *F = CS.getCalledFunction(); 415 if (!F) { 416 // Just use the called value type. 417 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType(); 418 return static_cast<T *>(this) 419 ->getCallCost(cast<FunctionType>(FTy), CS.arg_size()); 420 } 421 422 SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end()); 423 return static_cast<T *>(this)->getCallCost(F, Arguments); 424 } 425 426 if (const CastInst *CI = dyn_cast<CastInst>(U)) { 427 // Result of a cmp instruction is often extended (to be used by other 428 // cmp instructions, logical or return instructions). These are usually 429 // nop on most sane targets. 430 if (isa<CmpInst>(CI->getOperand(0))) 431 return TTI::TCC_Free; 432 } 433 434 return static_cast<T *>(this)->getOperationCost( 435 Operator::getOpcode(U), U->getType(), 436 U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr); 437 } 438 }; 439 } 440 441 #endif 442