1 //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file provides the implementation of a basic TargetTransformInfo pass 11 /// predicated on the target abstractions present in the target independent 12 /// code generator. It uses these (primarily TargetLowering) to model as much 13 /// of the TTI query interface as possible. It is included by most targets so 14 /// that they can specialize only a small subset of the query space. 15 /// 16 //===----------------------------------------------------------------------===// 17 18 #define DEBUG_TYPE "basictti" 19 #include "llvm/CodeGen/Passes.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Target/TargetLowering.h" 22 #include <utility> 23 24 using namespace llvm; 25 26 namespace { 27 28 class BasicTTI : public ImmutablePass, public TargetTransformInfo { 29 const TargetMachine *TM; 30 31 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 32 /// are set if the result needs to be inserted and/or extracted from vectors. 33 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 34 35 const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); } 36 37 public: 38 BasicTTI() : ImmutablePass(ID), TM(0) { 39 llvm_unreachable("This pass cannot be directly constructed"); 40 } 41 42 BasicTTI(const TargetMachine *TM) : ImmutablePass(ID), TM(TM) { 43 initializeBasicTTIPass(*PassRegistry::getPassRegistry()); 44 } 45 46 virtual void initializePass() { 47 pushTTIStack(this); 48 } 49 50 virtual void finalizePass() { 51 popTTIStack(); 52 } 53 54 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 55 TargetTransformInfo::getAnalysisUsage(AU); 56 } 57 58 /// Pass identification. 59 static char ID; 60 61 /// Provide necessary pointer adjustments for the two base classes. 62 virtual void *getAdjustedAnalysisPointer(const void *ID) { 63 if (ID == &TargetTransformInfo::ID) 64 return (TargetTransformInfo*)this; 65 return this; 66 } 67 68 virtual bool hasBranchDivergence() const; 69 70 /// \name Scalar TTI Implementations 71 /// @{ 72 73 virtual bool isLegalAddImmediate(int64_t imm) const; 74 virtual bool isLegalICmpImmediate(int64_t imm) const; 75 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 76 int64_t BaseOffset, bool HasBaseReg, 77 int64_t Scale) const; 78 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 79 int64_t BaseOffset, bool HasBaseReg, 80 int64_t Scale) const; 81 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; 82 virtual bool isTypeLegal(Type *Ty) const; 83 virtual unsigned getJumpBufAlignment() const; 84 virtual unsigned getJumpBufSize() const; 85 virtual bool shouldBuildLookupTables() const; 86 87 /// @} 88 89 /// \name Vector TTI Implementations 90 /// @{ 91 92 virtual unsigned getNumberOfRegisters(bool Vector) const; 93 virtual unsigned getMaximumUnrollFactor() const; 94 virtual unsigned getRegisterBitWidth(bool Vector) const; 95 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 96 OperandValueKind, 97 OperandValueKind) const; 98 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 99 int Index, Type *SubTp) const; 100 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 101 Type *Src) const; 102 virtual unsigned getCFInstrCost(unsigned Opcode) const; 103 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 104 Type *CondTy) const; 105 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 106 unsigned Index) const; 107 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src, 108 unsigned Alignment, 109 unsigned AddressSpace) const; 110 virtual unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy, 111 ArrayRef<Type*> Tys) const; 112 virtual unsigned getNumberOfParts(Type *Tp) const; 113 virtual unsigned getAddressComputationCost(Type *Ty, bool IsComplex) const; 114 115 /// @} 116 }; 117 118 } 119 120 INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti", 121 "Target independent code generator's TTI", true, true, false) 122 char BasicTTI::ID = 0; 123 124 ImmutablePass * 125 llvm::createBasicTargetTransformInfoPass(const TargetMachine *TM) { 126 return new BasicTTI(TM); 127 } 128 129 bool BasicTTI::hasBranchDivergence() const { return false; } 130 131 bool BasicTTI::isLegalAddImmediate(int64_t imm) const { 132 return getTLI()->isLegalAddImmediate(imm); 133 } 134 135 bool BasicTTI::isLegalICmpImmediate(int64_t imm) const { 136 return getTLI()->isLegalICmpImmediate(imm); 137 } 138 139 bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 140 int64_t BaseOffset, bool HasBaseReg, 141 int64_t Scale) const { 142 TargetLoweringBase::AddrMode AM; 143 AM.BaseGV = BaseGV; 144 AM.BaseOffs = BaseOffset; 145 AM.HasBaseReg = HasBaseReg; 146 AM.Scale = Scale; 147 return getTLI()->isLegalAddressingMode(AM, Ty); 148 } 149 150 int BasicTTI::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 151 int64_t BaseOffset, bool HasBaseReg, 152 int64_t Scale) const { 153 TargetLoweringBase::AddrMode AM; 154 AM.BaseGV = BaseGV; 155 AM.BaseOffs = BaseOffset; 156 AM.HasBaseReg = HasBaseReg; 157 AM.Scale = Scale; 158 return getTLI()->getScalingFactorCost(AM, Ty); 159 } 160 161 bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const { 162 return getTLI()->isTruncateFree(Ty1, Ty2); 163 } 164 165 bool BasicTTI::isTypeLegal(Type *Ty) const { 166 EVT T = getTLI()->getValueType(Ty); 167 return getTLI()->isTypeLegal(T); 168 } 169 170 unsigned BasicTTI::getJumpBufAlignment() const { 171 return getTLI()->getJumpBufAlignment(); 172 } 173 174 unsigned BasicTTI::getJumpBufSize() const { 175 return getTLI()->getJumpBufSize(); 176 } 177 178 bool BasicTTI::shouldBuildLookupTables() const { 179 const TargetLoweringBase *TLI = getTLI(); 180 return TLI->supportJumpTables() && 181 (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 182 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other)); 183 } 184 185 //===----------------------------------------------------------------------===// 186 // 187 // Calls used by the vectorizers. 188 // 189 //===----------------------------------------------------------------------===// 190 191 unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert, 192 bool Extract) const { 193 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 194 unsigned Cost = 0; 195 196 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 197 if (Insert) 198 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 199 if (Extract) 200 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 201 } 202 203 return Cost; 204 } 205 206 unsigned BasicTTI::getNumberOfRegisters(bool Vector) const { 207 return 1; 208 } 209 210 unsigned BasicTTI::getRegisterBitWidth(bool Vector) const { 211 return 32; 212 } 213 214 unsigned BasicTTI::getMaximumUnrollFactor() const { 215 return 1; 216 } 217 218 unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 219 OperandValueKind, 220 OperandValueKind) const { 221 // Check if any of the operands are vector operands. 222 const TargetLoweringBase *TLI = getTLI(); 223 int ISD = TLI->InstructionOpcodeToISD(Opcode); 224 assert(ISD && "Invalid opcode"); 225 226 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 227 228 bool IsFloat = Ty->getScalarType()->isFloatingPointTy(); 229 // Assume that floating point arithmetic operations cost twice as much as 230 // integer operations. 231 unsigned OpCost = (IsFloat ? 2 : 1); 232 233 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 234 // The operation is legal. Assume it costs 1. 235 // If the type is split to multiple registers, assume that there is some 236 // overhead to this. 237 // TODO: Once we have extract/insert subvector cost we need to use them. 238 if (LT.first > 1) 239 return LT.first * 2 * OpCost; 240 return LT.first * 1 * OpCost; 241 } 242 243 if (!TLI->isOperationExpand(ISD, LT.second)) { 244 // If the operation is custom lowered then assume 245 // thare the code is twice as expensive. 246 return LT.first * 2 * OpCost; 247 } 248 249 // Else, assume that we need to scalarize this op. 250 if (Ty->isVectorTy()) { 251 unsigned Num = Ty->getVectorNumElements(); 252 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType()); 253 // return the cost of multiple scalar invocation plus the cost of inserting 254 // and extracting the values. 255 return getScalarizationOverhead(Ty, true, true) + Num * Cost; 256 } 257 258 // We don't know anything about this scalar instruction. 259 return OpCost; 260 } 261 262 unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 263 Type *SubTp) const { 264 return 1; 265 } 266 267 unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst, 268 Type *Src) const { 269 const TargetLoweringBase *TLI = getTLI(); 270 int ISD = TLI->InstructionOpcodeToISD(Opcode); 271 assert(ISD && "Invalid opcode"); 272 273 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src); 274 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst); 275 276 // Check for NOOP conversions. 277 if (SrcLT.first == DstLT.first && 278 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { 279 280 // Bitcast between types that are legalized to the same type are free. 281 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc) 282 return 0; 283 } 284 285 if (Opcode == Instruction::Trunc && 286 TLI->isTruncateFree(SrcLT.second, DstLT.second)) 287 return 0; 288 289 if (Opcode == Instruction::ZExt && 290 TLI->isZExtFree(SrcLT.second, DstLT.second)) 291 return 0; 292 293 // If the cast is marked as legal (or promote) then assume low cost. 294 if (TLI->isOperationLegalOrPromote(ISD, DstLT.second)) 295 return 1; 296 297 // Handle scalar conversions. 298 if (!Src->isVectorTy() && !Dst->isVectorTy()) { 299 300 // Scalar bitcasts are usually free. 301 if (Opcode == Instruction::BitCast) 302 return 0; 303 304 // Just check the op cost. If the operation is legal then assume it costs 1. 305 if (!TLI->isOperationExpand(ISD, DstLT.second)) 306 return 1; 307 308 // Assume that illegal scalar instruction are expensive. 309 return 4; 310 } 311 312 // Check vector-to-vector casts. 313 if (Dst->isVectorTy() && Src->isVectorTy()) { 314 315 // If the cast is between same-sized registers, then the check is simple. 316 if (SrcLT.first == DstLT.first && 317 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { 318 319 // Assume that Zext is done using AND. 320 if (Opcode == Instruction::ZExt) 321 return 1; 322 323 // Assume that sext is done using SHL and SRA. 324 if (Opcode == Instruction::SExt) 325 return 2; 326 327 // Just check the op cost. If the operation is legal then assume it costs 328 // 1 and multiply by the type-legalization overhead. 329 if (!TLI->isOperationExpand(ISD, DstLT.second)) 330 return SrcLT.first * 1; 331 } 332 333 // If we are converting vectors and the operation is illegal, or 334 // if the vectors are legalized to different types, estimate the 335 // scalarization costs. 336 unsigned Num = Dst->getVectorNumElements(); 337 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(), 338 Src->getScalarType()); 339 340 // Return the cost of multiple scalar invocation plus the cost of 341 // inserting and extracting the values. 342 return getScalarizationOverhead(Dst, true, true) + Num * Cost; 343 } 344 345 // We already handled vector-to-vector and scalar-to-scalar conversions. This 346 // is where we handle bitcast between vectors and scalars. We need to assume 347 // that the conversion is scalarized in one way or another. 348 if (Opcode == Instruction::BitCast) 349 // Illegal bitcasts are done by storing and loading from a stack slot. 350 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) + 351 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0); 352 353 llvm_unreachable("Unhandled cast"); 354 } 355 356 unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const { 357 // Branches are assumed to be predicted. 358 return 0; 359 } 360 361 unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 362 Type *CondTy) const { 363 const TargetLoweringBase *TLI = getTLI(); 364 int ISD = TLI->InstructionOpcodeToISD(Opcode); 365 assert(ISD && "Invalid opcode"); 366 367 // Selects on vectors are actually vector selects. 368 if (ISD == ISD::SELECT) { 369 assert(CondTy && "CondTy must exist"); 370 if (CondTy->isVectorTy()) 371 ISD = ISD::VSELECT; 372 } 373 374 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 375 376 if (!TLI->isOperationExpand(ISD, LT.second)) { 377 // The operation is legal. Assume it costs 1. Multiply 378 // by the type-legalization overhead. 379 return LT.first * 1; 380 } 381 382 // Otherwise, assume that the cast is scalarized. 383 if (ValTy->isVectorTy()) { 384 unsigned Num = ValTy->getVectorNumElements(); 385 if (CondTy) 386 CondTy = CondTy->getScalarType(); 387 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(), 388 CondTy); 389 390 // Return the cost of multiple scalar invocation plus the cost of inserting 391 // and extracting the values. 392 return getScalarizationOverhead(ValTy, true, false) + Num * Cost; 393 } 394 395 // Unknown scalar opcode. 396 return 1; 397 } 398 399 unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val, 400 unsigned Index) const { 401 return 1; 402 } 403 404 unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src, 405 unsigned Alignment, 406 unsigned AddressSpace) const { 407 assert(!Src->isVoidTy() && "Invalid type"); 408 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src); 409 410 // Assume that all loads of legal types cost 1. 411 return LT.first; 412 } 413 414 unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 415 ArrayRef<Type *> Tys) const { 416 unsigned ISD = 0; 417 switch (IID) { 418 default: { 419 // Assume that we need to scalarize this intrinsic. 420 unsigned ScalarizationCost = 0; 421 unsigned ScalarCalls = 1; 422 if (RetTy->isVectorTy()) { 423 ScalarizationCost = getScalarizationOverhead(RetTy, true, false); 424 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 425 } 426 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { 427 if (Tys[i]->isVectorTy()) { 428 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true); 429 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 430 } 431 } 432 433 return ScalarCalls + ScalarizationCost; 434 } 435 // Look for intrinsics that can be lowered directly or turned into a scalar 436 // intrinsic call. 437 case Intrinsic::sqrt: ISD = ISD::FSQRT; break; 438 case Intrinsic::sin: ISD = ISD::FSIN; break; 439 case Intrinsic::cos: ISD = ISD::FCOS; break; 440 case Intrinsic::exp: ISD = ISD::FEXP; break; 441 case Intrinsic::exp2: ISD = ISD::FEXP2; break; 442 case Intrinsic::log: ISD = ISD::FLOG; break; 443 case Intrinsic::log10: ISD = ISD::FLOG10; break; 444 case Intrinsic::log2: ISD = ISD::FLOG2; break; 445 case Intrinsic::fabs: ISD = ISD::FABS; break; 446 case Intrinsic::floor: ISD = ISD::FFLOOR; break; 447 case Intrinsic::ceil: ISD = ISD::FCEIL; break; 448 case Intrinsic::trunc: ISD = ISD::FTRUNC; break; 449 case Intrinsic::nearbyint: 450 ISD = ISD::FNEARBYINT; break; 451 case Intrinsic::rint: ISD = ISD::FRINT; break; 452 case Intrinsic::pow: ISD = ISD::FPOW; break; 453 case Intrinsic::fma: ISD = ISD::FMA; break; 454 case Intrinsic::fmuladd: ISD = ISD::FMA; break; // FIXME: mul + add? 455 case Intrinsic::lifetime_start: 456 case Intrinsic::lifetime_end: 457 return 0; 458 } 459 460 const TargetLoweringBase *TLI = getTLI(); 461 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy); 462 463 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 464 // The operation is legal. Assume it costs 1. 465 // If the type is split to multiple registers, assume that thre is some 466 // overhead to this. 467 // TODO: Once we have extract/insert subvector cost we need to use them. 468 if (LT.first > 1) 469 return LT.first * 2; 470 return LT.first * 1; 471 } 472 473 if (!TLI->isOperationExpand(ISD, LT.second)) { 474 // If the operation is custom lowered then assume 475 // thare the code is twice as expensive. 476 return LT.first * 2; 477 } 478 479 // Else, assume that we need to scalarize this intrinsic. For math builtins 480 // this will emit a costly libcall, adding call overhead and spills. Make it 481 // very expensive. 482 if (RetTy->isVectorTy()) { 483 unsigned Num = RetTy->getVectorNumElements(); 484 unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(), 485 Tys); 486 return 10 * Cost * Num; 487 } 488 489 // This is going to be turned into a library call, make it expensive. 490 return 10; 491 } 492 493 unsigned BasicTTI::getNumberOfParts(Type *Tp) const { 494 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp); 495 return LT.first; 496 } 497 498 unsigned BasicTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 499 return 0; 500 } 501