1 //===-- ARMTargetTransformInfo.cpp - ARM specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// ARM target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #define DEBUG_TYPE "armtti" 18 #include "ARM.h" 19 #include "ARMTargetMachine.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Target/TargetLowering.h" 23 #include "llvm/Target/CostTable.h" 24 using namespace llvm; 25 26 // Declare the pass initialization routine locally as target-specific passes 27 // don't havve a target-wide initialization entry point, and so we rely on the 28 // pass constructor initialization. 29 namespace llvm { 30 void initializeARMTTIPass(PassRegistry &); 31 } 32 33 namespace { 34 35 class ARMTTI : public ImmutablePass, public TargetTransformInfo { 36 const ARMBaseTargetMachine *TM; 37 const ARMSubtarget *ST; 38 const ARMTargetLowering *TLI; 39 40 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 41 /// are set if the result needs to be inserted and/or extracted from vectors. 42 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 43 44 public: 45 ARMTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) { 46 llvm_unreachable("This pass cannot be directly constructed"); 47 } 48 49 ARMTTI(const ARMBaseTargetMachine *TM) 50 : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()), 51 TLI(TM->getTargetLowering()) { 52 initializeARMTTIPass(*PassRegistry::getPassRegistry()); 53 } 54 55 virtual void initializePass() { 56 pushTTIStack(this); 57 } 58 59 virtual void finalizePass() { 60 popTTIStack(); 61 } 62 63 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 64 TargetTransformInfo::getAnalysisUsage(AU); 65 } 66 67 /// Pass identification. 68 static char ID; 69 70 /// Provide necessary pointer adjustments for the two base classes. 71 virtual void *getAdjustedAnalysisPointer(const void *ID) { 72 if (ID == &TargetTransformInfo::ID) 73 return (TargetTransformInfo*)this; 74 return this; 75 } 76 77 /// \name Scalar TTI Implementations 78 /// @{ 79 80 virtual unsigned getIntImmCost(const APInt &Imm, Type *Ty) const; 81 82 /// @} 83 84 85 /// \name Vector TTI Implementations 86 /// @{ 87 88 unsigned getNumberOfRegisters(bool Vector) const { 89 if (Vector) { 90 if (ST->hasNEON()) 91 return 16; 92 return 0; 93 } 94 95 if (ST->isThumb1Only()) 96 return 8; 97 return 16; 98 } 99 100 unsigned getRegisterBitWidth(bool Vector) const { 101 if (Vector) { 102 if (ST->hasNEON()) 103 return 128; 104 return 0; 105 } 106 107 return 32; 108 } 109 110 unsigned getMaximumUnrollFactor() const { 111 // These are out of order CPUs: 112 if (ST->isCortexA15() || ST->isSwift()) 113 return 2; 114 return 1; 115 } 116 117 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 118 int Index, Type *SubTp) const; 119 120 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 121 Type *Src) const; 122 123 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) const; 124 125 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const; 126 127 unsigned getAddressComputationCost(Type *Val, bool IsComplex) const; 128 129 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, 130 OperandValueKind Op1Info = OK_AnyValue, 131 OperandValueKind Op2Info = OK_AnyValue) const; 132 /// @} 133 }; 134 135 } // end anonymous namespace 136 137 INITIALIZE_AG_PASS(ARMTTI, TargetTransformInfo, "armtti", 138 "ARM Target Transform Info", true, true, false) 139 char ARMTTI::ID = 0; 140 141 ImmutablePass * 142 llvm::createARMTargetTransformInfoPass(const ARMBaseTargetMachine *TM) { 143 return new ARMTTI(TM); 144 } 145 146 147 unsigned ARMTTI::getIntImmCost(const APInt &Imm, Type *Ty) const { 148 assert(Ty->isIntegerTy()); 149 150 unsigned Bits = Ty->getPrimitiveSizeInBits(); 151 if (Bits == 0 || Bits > 32) 152 return 4; 153 154 int32_t SImmVal = Imm.getSExtValue(); 155 uint32_t ZImmVal = Imm.getZExtValue(); 156 if (!ST->isThumb()) { 157 if ((SImmVal >= 0 && SImmVal < 65536) || 158 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 159 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 160 return 1; 161 return ST->hasV6T2Ops() ? 2 : 3; 162 } else if (ST->isThumb2()) { 163 if ((SImmVal >= 0 && SImmVal < 65536) || 164 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 165 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 166 return 1; 167 return ST->hasV6T2Ops() ? 2 : 3; 168 } else /*Thumb1*/ { 169 if (SImmVal >= 0 && SImmVal < 256) 170 return 1; 171 if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 172 return 2; 173 // Load from constantpool. 174 return 3; 175 } 176 return 2; 177 } 178 179 unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst, 180 Type *Src) const { 181 int ISD = TLI->InstructionOpcodeToISD(Opcode); 182 assert(ISD && "Invalid opcode"); 183 184 // Single to/from double precision conversions. 185 static const CostTblEntry<MVT> NEONFltDblTbl[] = { 186 // Vector fptrunc/fpext conversions. 187 { ISD::FP_ROUND, MVT::v2f64, 2 }, 188 { ISD::FP_EXTEND, MVT::v2f32, 2 }, 189 { ISD::FP_EXTEND, MVT::v4f32, 4 } 190 }; 191 192 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND || 193 ISD == ISD::FP_EXTEND)) { 194 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); 195 int Idx = CostTableLookup<MVT>(NEONFltDblTbl, array_lengthof(NEONFltDblTbl), 196 ISD, LT.second); 197 if (Idx != -1) 198 return LT.first * NEONFltDblTbl[Idx].Cost; 199 } 200 201 EVT SrcTy = TLI->getValueType(Src); 202 EVT DstTy = TLI->getValueType(Dst); 203 204 if (!SrcTy.isSimple() || !DstTy.isSimple()) 205 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 206 207 // Some arithmetic, load and store operations have specific instructions 208 // to cast up/down their types automatically at no extra cost. 209 // TODO: Get these tables to know at least what the related operations are. 210 static const TypeConversionCostTblEntry<MVT> NEONVectorConversionTbl[] = { 211 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 212 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 213 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 214 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 215 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 216 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 217 218 // The number of vmovl instructions for the extension. 219 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 220 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 221 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 222 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 223 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 224 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 225 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 226 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 227 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 228 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 229 230 // Operations that we legalize using splitting. 231 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 232 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 233 234 // Vector float <-> i32 conversions. 235 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 236 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 237 238 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 239 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 240 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 241 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 242 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 243 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 244 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 245 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 246 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 247 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 248 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 249 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 250 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 251 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 252 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 253 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 254 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 255 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 256 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 257 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 258 259 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 260 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 261 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 262 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 263 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 264 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 265 266 // Vector double <-> i32 conversions. 267 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 268 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 269 270 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 271 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 272 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 273 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 274 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 275 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 276 277 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 278 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 279 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 280 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 281 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 282 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 283 }; 284 285 if (SrcTy.isVector() && ST->hasNEON()) { 286 int Idx = ConvertCostTableLookup<MVT>(NEONVectorConversionTbl, 287 array_lengthof(NEONVectorConversionTbl), 288 ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 289 if (Idx != -1) 290 return NEONVectorConversionTbl[Idx].Cost; 291 } 292 293 // Scalar float to integer conversions. 294 static const TypeConversionCostTblEntry<MVT> NEONFloatConversionTbl[] = { 295 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 296 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 297 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 298 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 299 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 300 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 301 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 302 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 303 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 304 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 305 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 306 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 307 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 308 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 309 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 310 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 311 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 312 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 313 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 314 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 315 }; 316 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 317 int Idx = ConvertCostTableLookup<MVT>(NEONFloatConversionTbl, 318 array_lengthof(NEONFloatConversionTbl), 319 ISD, DstTy.getSimpleVT(), 320 SrcTy.getSimpleVT()); 321 if (Idx != -1) 322 return NEONFloatConversionTbl[Idx].Cost; 323 } 324 325 // Scalar integer to float conversions. 326 static const TypeConversionCostTblEntry<MVT> NEONIntegerConversionTbl[] = { 327 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 328 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 329 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 330 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 331 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 332 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 333 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 334 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 335 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 336 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 337 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 338 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 339 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 340 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 341 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 342 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 343 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 344 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 345 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 346 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 347 }; 348 349 if (SrcTy.isInteger() && ST->hasNEON()) { 350 int Idx = ConvertCostTableLookup<MVT>(NEONIntegerConversionTbl, 351 array_lengthof(NEONIntegerConversionTbl), 352 ISD, DstTy.getSimpleVT(), 353 SrcTy.getSimpleVT()); 354 if (Idx != -1) 355 return NEONIntegerConversionTbl[Idx].Cost; 356 } 357 358 // Scalar integer conversion costs. 359 static const TypeConversionCostTblEntry<MVT> ARMIntegerConversionTbl[] = { 360 // i16 -> i64 requires two dependent operations. 361 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 362 363 // Truncates on i64 are assumed to be free. 364 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 365 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 366 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 367 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 368 }; 369 370 if (SrcTy.isInteger()) { 371 int Idx = 372 ConvertCostTableLookup<MVT>(ARMIntegerConversionTbl, 373 array_lengthof(ARMIntegerConversionTbl), 374 ISD, DstTy.getSimpleVT(), 375 SrcTy.getSimpleVT()); 376 if (Idx != -1) 377 return ARMIntegerConversionTbl[Idx].Cost; 378 } 379 380 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 381 } 382 383 unsigned ARMTTI::getVectorInstrCost(unsigned Opcode, Type *ValTy, 384 unsigned Index) const { 385 // Penalize inserting into an D-subregister. We end up with a three times 386 // lower estimated throughput on swift. 387 if (ST->isSwift() && 388 Opcode == Instruction::InsertElement && 389 ValTy->isVectorTy() && 390 ValTy->getScalarSizeInBits() <= 32) 391 return 3; 392 393 return TargetTransformInfo::getVectorInstrCost(Opcode, ValTy, Index); 394 } 395 396 unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 397 Type *CondTy) const { 398 399 int ISD = TLI->InstructionOpcodeToISD(Opcode); 400 // On NEON a a vector select gets lowered to vbsl. 401 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { 402 // Lowering of some vector selects is currently far from perfect. 403 static const TypeConversionCostTblEntry<MVT> NEONVectorSelectTbl[] = { 404 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 2*16 + 1 + 3*1 + 4*1 }, 405 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 4*8 + 1*3 + 1*4 + 1*2 }, 406 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 4*16 + 1*6 + 1*8 + 1*4 }, 407 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 408 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 409 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 410 }; 411 412 EVT SelCondTy = TLI->getValueType(CondTy); 413 EVT SelValTy = TLI->getValueType(ValTy); 414 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 415 int Idx = ConvertCostTableLookup<MVT>(NEONVectorSelectTbl, 416 array_lengthof(NEONVectorSelectTbl), 417 ISD, SelCondTy.getSimpleVT(), 418 SelValTy.getSimpleVT()); 419 if (Idx != -1) 420 return NEONVectorSelectTbl[Idx].Cost; 421 } 422 423 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 424 return LT.first; 425 } 426 427 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); 428 } 429 430 unsigned ARMTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 431 // Address computations in vectorized code with non-consecutive addresses will 432 // likely result in more instructions compared to scalar code where the 433 // computation can more often be merged into the index mode. The resulting 434 // extra micro-ops can significantly decrease throughput. 435 unsigned NumVectorInstToHideOverhead = 10; 436 437 if (Ty->isVectorTy() && IsComplex) 438 return NumVectorInstToHideOverhead; 439 440 // In many cases the address computation is not merged into the instruction 441 // addressing mode. 442 return 1; 443 } 444 445 unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 446 Type *SubTp) const { 447 // We only handle costs of reverse shuffles for now. 448 if (Kind != SK_Reverse) 449 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 450 451 static const CostTblEntry<MVT> NEONShuffleTbl[] = { 452 // Reverse shuffle cost one instruction if we are shuffling within a double 453 // word (vrev) or two if we shuffle a quad word (vrev, vext). 454 { ISD::VECTOR_SHUFFLE, MVT::v2i32, 1 }, 455 { ISD::VECTOR_SHUFFLE, MVT::v2f32, 1 }, 456 { ISD::VECTOR_SHUFFLE, MVT::v2i64, 1 }, 457 { ISD::VECTOR_SHUFFLE, MVT::v2f64, 1 }, 458 459 { ISD::VECTOR_SHUFFLE, MVT::v4i32, 2 }, 460 { ISD::VECTOR_SHUFFLE, MVT::v4f32, 2 }, 461 { ISD::VECTOR_SHUFFLE, MVT::v8i16, 2 }, 462 { ISD::VECTOR_SHUFFLE, MVT::v16i8, 2 } 463 }; 464 465 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 466 467 int Idx = CostTableLookup<MVT>(NEONShuffleTbl, array_lengthof(NEONShuffleTbl), 468 ISD::VECTOR_SHUFFLE, LT.second); 469 if (Idx == -1) 470 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 471 472 return LT.first * NEONShuffleTbl[Idx].Cost; 473 } 474 475 unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Op1Info, 476 OperandValueKind Op2Info) const { 477 478 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 479 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 480 481 const unsigned FunctionCallDivCost = 20; 482 const unsigned ReciprocalDivCost = 10; 483 static const CostTblEntry<MVT> CostTbl[] = { 484 // Division. 485 // These costs are somewhat random. Choose a cost of 20 to indicate that 486 // vectorizing devision (added function call) is going to be very expensive. 487 // Double registers types. 488 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 489 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 490 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 491 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 492 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 493 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 494 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 495 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 496 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 497 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 498 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 499 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 500 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 501 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 502 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 503 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 504 // Quad register types. 505 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 506 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 507 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 508 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 509 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 510 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 511 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 512 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 513 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 514 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 515 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 516 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 517 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 518 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 519 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 520 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 521 // Multiplication. 522 }; 523 524 int Idx = -1; 525 526 if (ST->hasNEON()) 527 Idx = CostTableLookup<MVT>(CostTbl, array_lengthof(CostTbl), ISDOpcode, 528 LT.second); 529 530 if (Idx != -1) 531 return LT.first * CostTbl[Idx].Cost; 532 533 534 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, 535 Op2Info); 536 } 537 538