1 //===-- ARMTargetTransformInfo.cpp - ARM specific TTI ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "ARMTargetTransformInfo.h" 11 #include "llvm/Support/Debug.h" 12 #include "llvm/Target/CostTable.h" 13 #include "llvm/Target/TargetLowering.h" 14 using namespace llvm; 15 16 #define DEBUG_TYPE "armtti" 17 18 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 19 assert(Ty->isIntegerTy()); 20 21 unsigned Bits = Ty->getPrimitiveSizeInBits(); 22 if (Bits == 0 || Bits > 32) 23 return 4; 24 25 int32_t SImmVal = Imm.getSExtValue(); 26 uint32_t ZImmVal = Imm.getZExtValue(); 27 if (!ST->isThumb()) { 28 if ((SImmVal >= 0 && SImmVal < 65536) || 29 (ARM_AM::getSOImmVal(ZImmVal) != -1) || 30 (ARM_AM::getSOImmVal(~ZImmVal) != -1)) 31 return 1; 32 return ST->hasV6T2Ops() ? 2 : 3; 33 } 34 if (ST->isThumb2()) { 35 if ((SImmVal >= 0 && SImmVal < 65536) || 36 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) || 37 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1)) 38 return 1; 39 return ST->hasV6T2Ops() ? 2 : 3; 40 } 41 // Thumb1. 42 if (SImmVal >= 0 && SImmVal < 256) 43 return 1; 44 if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal)) 45 return 2; 46 // Load from constantpool. 47 return 3; 48 } 49 50 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 51 int ISD = TLI->InstructionOpcodeToISD(Opcode); 52 assert(ISD && "Invalid opcode"); 53 54 // Single to/from double precision conversions. 55 static const CostTblEntry NEONFltDblTbl[] = { 56 // Vector fptrunc/fpext conversions. 57 { ISD::FP_ROUND, MVT::v2f64, 2 }, 58 { ISD::FP_EXTEND, MVT::v2f32, 2 }, 59 { ISD::FP_EXTEND, MVT::v4f32, 4 } 60 }; 61 62 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND || 63 ISD == ISD::FP_EXTEND)) { 64 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 65 if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second)) 66 return LT.first * Entry->Cost; 67 } 68 69 EVT SrcTy = TLI->getValueType(DL, Src); 70 EVT DstTy = TLI->getValueType(DL, Dst); 71 72 if (!SrcTy.isSimple() || !DstTy.isSimple()) 73 return BaseT::getCastInstrCost(Opcode, Dst, Src); 74 75 // Some arithmetic, load and store operations have specific instructions 76 // to cast up/down their types automatically at no extra cost. 77 // TODO: Get these tables to know at least what the related operations are. 78 static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = { 79 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 80 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 }, 81 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 82 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 }, 83 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, 84 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 85 86 // The number of vmovl instructions for the extension. 87 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 88 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 89 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 90 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 91 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 92 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, 93 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 94 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, 95 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 96 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, 97 98 // Operations that we legalize using splitting. 99 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 100 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 101 102 // Vector float <-> i32 conversions. 103 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 104 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 105 106 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 107 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, 108 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 109 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2 }, 110 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 111 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 112 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 113 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 114 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 115 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 116 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 117 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 118 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 119 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 120 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 121 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 122 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 123 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 8 }, 124 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 125 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 4 }, 126 127 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 128 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 129 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 130 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 131 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 132 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 133 134 // Vector double <-> i32 conversions. 135 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 136 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 137 138 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 139 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, 140 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 141 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 3 }, 142 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 143 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, 144 145 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, 146 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, 147 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 4 }, 148 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 4 }, 149 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 8 }, 150 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 8 } 151 }; 152 153 if (SrcTy.isVector() && ST->hasNEON()) { 154 if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD, 155 DstTy.getSimpleVT(), 156 SrcTy.getSimpleVT())) 157 return Entry->Cost; 158 } 159 160 // Scalar float to integer conversions. 161 static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = { 162 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 }, 163 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 }, 164 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 }, 165 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 }, 166 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 }, 167 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 }, 168 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 }, 169 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 }, 170 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 }, 171 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 }, 172 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 }, 173 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 }, 174 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 }, 175 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 }, 176 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 }, 177 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 }, 178 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 }, 179 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 }, 180 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 }, 181 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 } 182 }; 183 if (SrcTy.isFloatingPoint() && ST->hasNEON()) { 184 if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD, 185 DstTy.getSimpleVT(), 186 SrcTy.getSimpleVT())) 187 return Entry->Cost; 188 } 189 190 // Scalar integer to float conversions. 191 static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = { 192 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 }, 193 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 }, 194 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 }, 195 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 }, 196 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 }, 197 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 }, 198 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 }, 199 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 }, 200 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 }, 201 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 }, 202 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 }, 203 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 }, 204 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 }, 205 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 }, 206 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 }, 207 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 }, 208 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 }, 209 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 }, 210 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 }, 211 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 } 212 }; 213 214 if (SrcTy.isInteger() && ST->hasNEON()) { 215 if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl, 216 ISD, DstTy.getSimpleVT(), 217 SrcTy.getSimpleVT())) 218 return Entry->Cost; 219 } 220 221 // Scalar integer conversion costs. 222 static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = { 223 // i16 -> i64 requires two dependent operations. 224 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 }, 225 226 // Truncates on i64 are assumed to be free. 227 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 }, 228 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 }, 229 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 }, 230 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 } 231 }; 232 233 if (SrcTy.isInteger()) { 234 if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD, 235 DstTy.getSimpleVT(), 236 SrcTy.getSimpleVT())) 237 return Entry->Cost; 238 } 239 240 return BaseT::getCastInstrCost(Opcode, Dst, Src); 241 } 242 243 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy, 244 unsigned Index) { 245 // Penalize inserting into an D-subregister. We end up with a three times 246 // lower estimated throughput on swift. 247 if (ST->isSwift() && 248 Opcode == Instruction::InsertElement && 249 ValTy->isVectorTy() && 250 ValTy->getScalarSizeInBits() <= 32) 251 return 3; 252 253 if ((Opcode == Instruction::InsertElement || 254 Opcode == Instruction::ExtractElement)) { 255 // Cross-class copies are expensive on many microarchitectures, 256 // so assume they are expensive by default. 257 if (ValTy->getVectorElementType()->isIntegerTy()) 258 return 3; 259 260 // Even if it's not a cross class copy, this likely leads to mixing 261 // of NEON and VFP code and should be therefore penalized. 262 if (ValTy->isVectorTy() && 263 ValTy->getScalarSizeInBits() <= 32) 264 return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U); 265 } 266 267 return BaseT::getVectorInstrCost(Opcode, ValTy, Index); 268 } 269 270 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 271 272 int ISD = TLI->InstructionOpcodeToISD(Opcode); 273 // On NEON a a vector select gets lowered to vbsl. 274 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { 275 // Lowering of some vector selects is currently far from perfect. 276 static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { 277 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, 278 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, 279 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } 280 }; 281 282 EVT SelCondTy = TLI->getValueType(DL, CondTy); 283 EVT SelValTy = TLI->getValueType(DL, ValTy); 284 if (SelCondTy.isSimple() && SelValTy.isSimple()) { 285 if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD, 286 SelCondTy.getSimpleVT(), 287 SelValTy.getSimpleVT())) 288 return Entry->Cost; 289 } 290 291 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 292 return LT.first; 293 } 294 295 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 296 } 297 298 int ARMTTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 299 // Address computations in vectorized code with non-consecutive addresses will 300 // likely result in more instructions compared to scalar code where the 301 // computation can more often be merged into the index mode. The resulting 302 // extra micro-ops can significantly decrease throughput. 303 unsigned NumVectorInstToHideOverhead = 10; 304 305 if (Ty->isVectorTy() && IsComplex) 306 return NumVectorInstToHideOverhead; 307 308 // In many cases the address computation is not merged into the instruction 309 // addressing mode. 310 return 1; 311 } 312 313 int ARMTTIImpl::getFPOpCost(Type *Ty) { 314 // Use similar logic that's in ARMISelLowering: 315 // Any ARM CPU with VFP2 has floating point, but Thumb1 didn't have access 316 // to VFP. 317 318 if (ST->hasVFP2() && !ST->isThumb1Only()) { 319 if (Ty->isFloatTy()) { 320 return TargetTransformInfo::TCC_Basic; 321 } 322 323 if (Ty->isDoubleTy()) { 324 return ST->isFPOnlySP() ? TargetTransformInfo::TCC_Expensive : 325 TargetTransformInfo::TCC_Basic; 326 } 327 } 328 329 return TargetTransformInfo::TCC_Expensive; 330 } 331 332 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 333 Type *SubTp) { 334 // We only handle costs of reverse and alternate shuffles for now. 335 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 336 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 337 338 if (Kind == TTI::SK_Reverse) { 339 static const CostTblEntry NEONShuffleTbl[] = { 340 // Reverse shuffle cost one instruction if we are shuffling within a 341 // double word (vrev) or two if we shuffle a quad word (vrev, vext). 342 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 343 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 344 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 345 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 346 347 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 348 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 349 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2}, 350 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}}; 351 352 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 353 354 if (const auto *Entry = CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, 355 LT.second)) 356 return LT.first * Entry->Cost; 357 358 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 359 } 360 if (Kind == TTI::SK_Alternate) { 361 static const CostTblEntry NEONAltShuffleTbl[] = { 362 // Alt shuffle cost table for ARM. Cost is the number of instructions 363 // required to create the shuffled vector. 364 365 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 366 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 367 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 368 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1}, 369 370 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 371 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 372 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2}, 373 374 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16}, 375 376 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}}; 377 378 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 379 if (const auto *Entry = CostTableLookup(NEONAltShuffleTbl, 380 ISD::VECTOR_SHUFFLE, LT.second)) 381 return LT.first * Entry->Cost; 382 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 383 } 384 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 385 } 386 387 int ARMTTIImpl::getArithmeticInstrCost( 388 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 389 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 390 TTI::OperandValueProperties Opd2PropInfo) { 391 392 int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode); 393 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 394 395 const unsigned FunctionCallDivCost = 20; 396 const unsigned ReciprocalDivCost = 10; 397 static const CostTblEntry CostTbl[] = { 398 // Division. 399 // These costs are somewhat random. Choose a cost of 20 to indicate that 400 // vectorizing devision (added function call) is going to be very expensive. 401 // Double registers types. 402 { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 403 { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost}, 404 { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost}, 405 { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost}, 406 { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 407 { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost}, 408 { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost}, 409 { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost}, 410 { ISD::SDIV, MVT::v4i16, ReciprocalDivCost}, 411 { ISD::UDIV, MVT::v4i16, ReciprocalDivCost}, 412 { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost}, 413 { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost}, 414 { ISD::SDIV, MVT::v8i8, ReciprocalDivCost}, 415 { ISD::UDIV, MVT::v8i8, ReciprocalDivCost}, 416 { ISD::SREM, MVT::v8i8, 8 * FunctionCallDivCost}, 417 { ISD::UREM, MVT::v8i8, 8 * FunctionCallDivCost}, 418 // Quad register types. 419 { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 420 { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost}, 421 { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost}, 422 { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost}, 423 { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 424 { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost}, 425 { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost}, 426 { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost}, 427 { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 428 { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost}, 429 { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost}, 430 { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost}, 431 { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 432 { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost}, 433 { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost}, 434 { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost}, 435 // Multiplication. 436 }; 437 438 if (ST->hasNEON()) 439 if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second)) 440 return LT.first * Entry->Cost; 441 442 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 443 Opd1PropInfo, Opd2PropInfo); 444 445 // This is somewhat of a hack. The problem that we are facing is that SROA 446 // creates a sequence of shift, and, or instructions to construct values. 447 // These sequences are recognized by the ISel and have zero-cost. Not so for 448 // the vectorized code. Because we have support for v2i64 but not i64 those 449 // sequences look particularly beneficial to vectorize. 450 // To work around this we increase the cost of v2i64 operations to make them 451 // seem less beneficial. 452 if (LT.second == MVT::v2i64 && 453 Op2Info == TargetTransformInfo::OK_UniformConstantValue) 454 Cost += 4; 455 456 return Cost; 457 } 458 459 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 460 unsigned AddressSpace) { 461 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 462 463 if (Src->isVectorTy() && Alignment != 16 && 464 Src->getVectorElementType()->isDoubleTy()) { 465 // Unaligned loads/stores are extremely inefficient. 466 // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. 467 return LT.first * 4; 468 } 469 return LT.first; 470 } 471 472 int ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 473 unsigned Factor, 474 ArrayRef<unsigned> Indices, 475 unsigned Alignment, 476 unsigned AddressSpace) { 477 assert(Factor >= 2 && "Invalid interleave factor"); 478 assert(isa<VectorType>(VecTy) && "Expect a vector type"); 479 480 // vldN/vstN doesn't support vector types of i64/f64 element. 481 bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64; 482 483 if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits) { 484 unsigned NumElts = VecTy->getVectorNumElements(); 485 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); 486 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy); 487 488 // vldN/vstN only support legal vector types of size 64 or 128 in bits. 489 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128)) 490 return Factor; 491 } 492 493 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 494 Alignment, AddressSpace); 495 } 496