1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file describes how to lower LLVM code to machine code. This has two 11 // main components: 12 // 13 // 1. Which ValueTypes are natively supported by the target. 14 // 2. Which operations are supported for supported ValueTypes. 15 // 3. Cost thresholds for alternative implementations of certain operations. 16 // 17 // In addition it has a few other components, like information about FP 18 // immediates. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #ifndef LLVM_TARGET_TARGETLOWERING_H 23 #define LLVM_TARGET_TARGETLOWERING_H 24 25 #include "llvm/CallingConv.h" 26 #include "llvm/InlineAsm.h" 27 #include "llvm/Attributes.h" 28 #include "llvm/CodeGen/SelectionDAGNodes.h" 29 #include "llvm/CodeGen/RuntimeLibcalls.h" 30 #include "llvm/Support/DebugLoc.h" 31 #include "llvm/Target/TargetCallingConv.h" 32 #include "llvm/Target/TargetMachine.h" 33 #include <climits> 34 #include <map> 35 #include <vector> 36 37 namespace llvm { 38 class CallInst; 39 class CCState; 40 class FastISel; 41 class FunctionLoweringInfo; 42 class ImmutableCallSite; 43 class IntrinsicInst; 44 class MachineBasicBlock; 45 class MachineFunction; 46 class MachineInstr; 47 class MachineJumpTableInfo; 48 class MCContext; 49 class MCExpr; 50 template<typename T> class SmallVectorImpl; 51 class TargetData; 52 class TargetRegisterClass; 53 class TargetLoweringObjectFile; 54 class Value; 55 56 namespace Sched { 57 enum Preference { 58 None, // No preference 59 Source, // Follow source order. 60 RegPressure, // Scheduling for lowest register pressure. 61 Hybrid, // Scheduling for both latency and register pressure. 62 ILP, // Scheduling for ILP in low register pressure mode. 63 VLIW // Scheduling for VLIW targets. 64 }; 65 } 66 67 68 //===----------------------------------------------------------------------===// 69 /// TargetLowering - This class defines information used to lower LLVM code to 70 /// legal SelectionDAG operators that the target instruction selector can accept 71 /// natively. 72 /// 73 /// This class also defines callbacks that targets must implement to lower 74 /// target-specific constructs to SelectionDAG operators. 75 /// 76 class TargetLowering { 77 TargetLowering(const TargetLowering&); // DO NOT IMPLEMENT 78 void operator=(const TargetLowering&); // DO NOT IMPLEMENT 79 public: 80 /// LegalizeAction - This enum indicates whether operations are valid for a 81 /// target, and if not, what action should be used to make them valid. 82 enum LegalizeAction { 83 Legal, // The target natively supports this operation. 84 Promote, // This operation should be executed in a larger type. 85 Expand, // Try to expand this to other ops, otherwise use a libcall. 86 Custom // Use the LowerOperation hook to implement custom lowering. 87 }; 88 89 /// LegalizeTypeAction - This enum indicates whether a types are legal for a 90 /// target, and if not, what action should be used to make them valid. 91 enum LegalizeTypeAction { 92 TypeLegal, // The target natively supports this type. 93 TypePromoteInteger, // Replace this integer with a larger one. 94 TypeExpandInteger, // Split this integer into two of half the size. 95 TypeSoftenFloat, // Convert this float to a same size integer type. 96 TypeExpandFloat, // Split this float into two of half the size. 97 TypeScalarizeVector, // Replace this one-element vector with its element. 98 TypeSplitVector, // Split this vector into two of half the size. 99 TypeWidenVector // This vector should be widened into a larger vector. 100 }; 101 102 enum BooleanContent { // How the target represents true/false values. 103 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 104 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 105 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 106 }; 107 108 static ISD::NodeType getExtendForContent(BooleanContent Content) { 109 switch (Content) { 110 case UndefinedBooleanContent: 111 // Extend by adding rubbish bits. 112 return ISD::ANY_EXTEND; 113 case ZeroOrOneBooleanContent: 114 // Extend by adding zero bits. 115 return ISD::ZERO_EXTEND; 116 case ZeroOrNegativeOneBooleanContent: 117 // Extend by copying the sign bit. 118 return ISD::SIGN_EXTEND; 119 } 120 llvm_unreachable("Invalid content kind"); 121 } 122 123 /// NOTE: The constructor takes ownership of TLOF. 124 explicit TargetLowering(const TargetMachine &TM, 125 const TargetLoweringObjectFile *TLOF); 126 virtual ~TargetLowering(); 127 128 const TargetMachine &getTargetMachine() const { return TM; } 129 const TargetData *getTargetData() const { return TD; } 130 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } 131 132 bool isBigEndian() const { return !IsLittleEndian; } 133 bool isLittleEndian() const { return IsLittleEndian; } 134 MVT getPointerTy() const { return PointerTy; } 135 virtual MVT getShiftAmountTy(EVT LHSTy) const; 136 137 /// isSelectExpensive - Return true if the select operation is expensive for 138 /// this target. 139 bool isSelectExpensive() const { return SelectIsExpensive; } 140 141 /// isIntDivCheap() - Return true if integer divide is usually cheaper than 142 /// a sequence of several shifts, adds, and multiplies for this target. 143 bool isIntDivCheap() const { return IntDivIsCheap; } 144 145 /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of 146 /// srl/add/sra. 147 bool isPow2DivCheap() const { return Pow2DivIsCheap; } 148 149 /// isJumpExpensive() - Return true if Flow Control is an expensive operation 150 /// that should be avoided. 151 bool isJumpExpensive() const { return JumpIsExpensive; } 152 153 /// getSetCCResultType - Return the ValueType of the result of SETCC 154 /// operations. Also used to obtain the target's preferred type for 155 /// the condition operand of SELECT and BRCOND nodes. In the case of 156 /// BRCOND the argument passed is MVT::Other since there are no other 157 /// operands to get a type hint from. 158 virtual EVT getSetCCResultType(EVT VT) const; 159 160 /// getCmpLibcallReturnType - Return the ValueType for comparison 161 /// libcalls. Comparions libcalls include floating point comparion calls, 162 /// and Ordered/Unordered check calls on floating point numbers. 163 virtual 164 MVT::SimpleValueType getCmpLibcallReturnType() const; 165 166 /// getBooleanContents - For targets without i1 registers, this gives the 167 /// nature of the high-bits of boolean values held in types wider than i1. 168 /// "Boolean values" are special true/false values produced by nodes like 169 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 170 /// Not to be confused with general values promoted from i1. 171 /// Some cpus distinguish between vectors of boolean and scalars; the isVec 172 /// parameter selects between the two kinds. For example on X86 a scalar 173 /// boolean should be zero extended from i1, while the elements of a vector 174 /// of booleans should be sign extended from i1. 175 BooleanContent getBooleanContents(bool isVec) const { 176 return isVec ? BooleanVectorContents : BooleanContents; 177 } 178 179 /// getSchedulingPreference - Return target scheduling preference. 180 Sched::Preference getSchedulingPreference() const { 181 return SchedPreferenceInfo; 182 } 183 184 /// getSchedulingPreference - Some scheduler, e.g. hybrid, can switch to 185 /// different scheduling heuristics for different nodes. This function returns 186 /// the preference (or none) for the given node. 187 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 188 return Sched::None; 189 } 190 191 /// getRegClassFor - Return the register class that should be used for the 192 /// specified value type. 193 virtual const TargetRegisterClass *getRegClassFor(EVT VT) const { 194 assert(VT.isSimple() && "getRegClassFor called on illegal type!"); 195 const TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy]; 196 assert(RC && "This value type is not natively supported!"); 197 return RC; 198 } 199 200 /// getRepRegClassFor - Return the 'representative' register class for the 201 /// specified value type. The 'representative' register class is the largest 202 /// legal super-reg register class for the register class of the value type. 203 /// For example, on i386 the rep register class for i8, i16, and i32 are GR32; 204 /// while the rep register class is GR64 on x86_64. 205 virtual const TargetRegisterClass *getRepRegClassFor(EVT VT) const { 206 assert(VT.isSimple() && "getRepRegClassFor called on illegal type!"); 207 const TargetRegisterClass *RC = RepRegClassForVT[VT.getSimpleVT().SimpleTy]; 208 return RC; 209 } 210 211 /// getRepRegClassCostFor - Return the cost of the 'representative' register 212 /// class for the specified value type. 213 virtual uint8_t getRepRegClassCostFor(EVT VT) const { 214 assert(VT.isSimple() && "getRepRegClassCostFor called on illegal type!"); 215 return RepRegClassCostForVT[VT.getSimpleVT().SimpleTy]; 216 } 217 218 /// isTypeLegal - Return true if the target has native support for the 219 /// specified value type. This means that it has a register that directly 220 /// holds it without promotions or expansions. 221 bool isTypeLegal(EVT VT) const { 222 assert(!VT.isSimple() || 223 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 224 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0; 225 } 226 227 class ValueTypeActionImpl { 228 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 229 /// that indicates how instruction selection should deal with the type. 230 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE]; 231 232 public: 233 ValueTypeActionImpl() { 234 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0); 235 } 236 237 LegalizeTypeAction getTypeAction(MVT VT) const { 238 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy]; 239 } 240 241 void setTypeAction(EVT VT, LegalizeTypeAction Action) { 242 unsigned I = VT.getSimpleVT().SimpleTy; 243 ValueTypeActions[I] = Action; 244 } 245 }; 246 247 const ValueTypeActionImpl &getValueTypeActions() const { 248 return ValueTypeActions; 249 } 250 251 /// getTypeAction - Return how we should legalize values of this type, either 252 /// it is already legal (return 'Legal') or we need to promote it to a larger 253 /// type (return 'Promote'), or we need to expand it into multiple registers 254 /// of smaller integer type (return 'Expand'). 'Custom' is not an option. 255 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 256 return getTypeConversion(Context, VT).first; 257 } 258 LegalizeTypeAction getTypeAction(MVT VT) const { 259 return ValueTypeActions.getTypeAction(VT); 260 } 261 262 /// getTypeToTransformTo - For types supported by the target, this is an 263 /// identity function. For types that must be promoted to larger types, this 264 /// returns the larger type to promote to. For integer types that are larger 265 /// than the largest integer register, this contains one step in the expansion 266 /// to get to the smaller register. For illegal floating point types, this 267 /// returns the integer type to transform to. 268 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 269 return getTypeConversion(Context, VT).second; 270 } 271 272 /// getTypeToExpandTo - For types supported by the target, this is an 273 /// identity function. For types that must be expanded (i.e. integer types 274 /// that are larger than the largest integer register or illegal floating 275 /// point types), this returns the largest legal type it will be expanded to. 276 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 277 assert(!VT.isVector()); 278 while (true) { 279 switch (getTypeAction(Context, VT)) { 280 case TypeLegal: 281 return VT; 282 case TypeExpandInteger: 283 VT = getTypeToTransformTo(Context, VT); 284 break; 285 default: 286 llvm_unreachable("Type is not legal nor is it to be expanded!"); 287 } 288 } 289 } 290 291 /// getVectorTypeBreakdown - Vector types are broken down into some number of 292 /// legal first class types. For example, EVT::v8f32 maps to 2 EVT::v4f32 293 /// with Altivec or SSE1, or 8 promoted EVT::f64 values with the X86 FP stack. 294 /// Similarly, EVT::v2i64 turns into 4 EVT::i32 values with both PPC and X86. 295 /// 296 /// This method returns the number of registers needed, and the VT for each 297 /// register. It also returns the VT and quantity of the intermediate values 298 /// before they are promoted/expanded. 299 /// 300 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 301 EVT &IntermediateVT, 302 unsigned &NumIntermediates, 303 EVT &RegisterVT) const; 304 305 /// getTgtMemIntrinsic: Given an intrinsic, checks if on the target the 306 /// intrinsic will need to map to a MemIntrinsicNode (touches memory). If 307 /// this is the case, it returns true and store the intrinsic 308 /// information into the IntrinsicInfo that was passed to the function. 309 struct IntrinsicInfo { 310 unsigned opc; // target opcode 311 EVT memVT; // memory VT 312 const Value* ptrVal; // value representing memory location 313 int offset; // offset off of ptrVal 314 unsigned align; // alignment 315 bool vol; // is volatile? 316 bool readMem; // reads memory? 317 bool writeMem; // writes memory? 318 }; 319 320 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 321 unsigned /*Intrinsic*/) const { 322 return false; 323 } 324 325 /// isFPImmLegal - Returns true if the target can instruction select the 326 /// specified FP immediate natively. If false, the legalizer will materialize 327 /// the FP immediate as a load from a constant pool. 328 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const { 329 return false; 330 } 331 332 /// isShuffleMaskLegal - Targets can use this to indicate that they only 333 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 334 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 335 /// are assumed to be legal. 336 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 337 EVT /*VT*/) const { 338 return true; 339 } 340 341 /// canOpTrap - Returns true if the operation can trap for the value type. 342 /// VT must be a legal type. By default, we optimistically assume most 343 /// operations don't trap except for divide and remainder. 344 virtual bool canOpTrap(unsigned Op, EVT VT) const; 345 346 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 347 /// used by Targets can use this to indicate if there is a suitable 348 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 349 /// pool entry. 350 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/, 351 EVT /*VT*/) const { 352 return false; 353 } 354 355 /// getOperationAction - Return how this operation should be treated: either 356 /// it is legal, needs to be promoted to a larger size, needs to be 357 /// expanded to some other code sequence, or the target has a custom expander 358 /// for it. 359 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 360 if (VT.isExtended()) return Expand; 361 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); 362 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy; 363 return (LegalizeAction)OpActions[I][Op]; 364 } 365 366 /// isOperationLegalOrCustom - Return true if the specified operation is 367 /// legal on this target or can be made legal with custom lowering. This 368 /// is used to help guide high-level lowering decisions. 369 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const { 370 return (VT == MVT::Other || isTypeLegal(VT)) && 371 (getOperationAction(Op, VT) == Legal || 372 getOperationAction(Op, VT) == Custom); 373 } 374 375 /// isOperationLegal - Return true if the specified operation is legal on this 376 /// target. 377 bool isOperationLegal(unsigned Op, EVT VT) const { 378 return (VT == MVT::Other || isTypeLegal(VT)) && 379 getOperationAction(Op, VT) == Legal; 380 } 381 382 /// getLoadExtAction - Return how this load with extension should be treated: 383 /// either it is legal, needs to be promoted to a larger size, needs to be 384 /// expanded to some other code sequence, or the target has a custom expander 385 /// for it. 386 LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const { 387 assert(ExtType < ISD::LAST_LOADEXT_TYPE && 388 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 389 "Table isn't big enough!"); 390 return (LegalizeAction)LoadExtActions[VT.getSimpleVT().SimpleTy][ExtType]; 391 } 392 393 /// isLoadExtLegal - Return true if the specified load with extension is legal 394 /// on this target. 395 bool isLoadExtLegal(unsigned ExtType, EVT VT) const { 396 return VT.isSimple() && getLoadExtAction(ExtType, VT) == Legal; 397 } 398 399 /// getTruncStoreAction - Return how this store with truncation should be 400 /// treated: either it is legal, needs to be promoted to a larger size, needs 401 /// to be expanded to some other code sequence, or the target has a custom 402 /// expander for it. 403 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { 404 assert(ValVT.getSimpleVT() < MVT::LAST_VALUETYPE && 405 MemVT.getSimpleVT() < MVT::LAST_VALUETYPE && 406 "Table isn't big enough!"); 407 return (LegalizeAction)TruncStoreActions[ValVT.getSimpleVT().SimpleTy] 408 [MemVT.getSimpleVT().SimpleTy]; 409 } 410 411 /// isTruncStoreLegal - Return true if the specified store with truncation is 412 /// legal on this target. 413 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 414 return isTypeLegal(ValVT) && MemVT.isSimple() && 415 getTruncStoreAction(ValVT, MemVT) == Legal; 416 } 417 418 /// getIndexedLoadAction - Return how the indexed load should be treated: 419 /// either it is legal, needs to be promoted to a larger size, needs to be 420 /// expanded to some other code sequence, or the target has a custom expander 421 /// for it. 422 LegalizeAction 423 getIndexedLoadAction(unsigned IdxMode, EVT VT) const { 424 assert(IdxMode < ISD::LAST_INDEXED_MODE && 425 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 426 "Table isn't big enough!"); 427 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy; 428 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4); 429 } 430 431 /// isIndexedLoadLegal - Return true if the specified indexed load is legal 432 /// on this target. 433 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 434 return VT.isSimple() && 435 (getIndexedLoadAction(IdxMode, VT) == Legal || 436 getIndexedLoadAction(IdxMode, VT) == Custom); 437 } 438 439 /// getIndexedStoreAction - Return how the indexed store should be treated: 440 /// either it is legal, needs to be promoted to a larger size, needs to be 441 /// expanded to some other code sequence, or the target has a custom expander 442 /// for it. 443 LegalizeAction 444 getIndexedStoreAction(unsigned IdxMode, EVT VT) const { 445 assert(IdxMode < ISD::LAST_INDEXED_MODE && 446 VT.getSimpleVT() < MVT::LAST_VALUETYPE && 447 "Table isn't big enough!"); 448 unsigned Ty = (unsigned)VT.getSimpleVT().SimpleTy; 449 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f); 450 } 451 452 /// isIndexedStoreLegal - Return true if the specified indexed load is legal 453 /// on this target. 454 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 455 return VT.isSimple() && 456 (getIndexedStoreAction(IdxMode, VT) == Legal || 457 getIndexedStoreAction(IdxMode, VT) == Custom); 458 } 459 460 /// getCondCodeAction - Return how the condition code should be treated: 461 /// either it is legal, needs to be expanded to some other code sequence, 462 /// or the target has a custom expander for it. 463 LegalizeAction 464 getCondCodeAction(ISD::CondCode CC, EVT VT) const { 465 assert((unsigned)CC < array_lengthof(CondCodeActions) && 466 (unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 && 467 "Table isn't big enough!"); 468 LegalizeAction Action = (LegalizeAction) 469 ((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3); 470 assert(Action != Promote && "Can't promote condition code!"); 471 return Action; 472 } 473 474 /// isCondCodeLegal - Return true if the specified condition code is legal 475 /// on this target. 476 bool isCondCodeLegal(ISD::CondCode CC, EVT VT) const { 477 return getCondCodeAction(CC, VT) == Legal || 478 getCondCodeAction(CC, VT) == Custom; 479 } 480 481 482 /// getTypeToPromoteTo - If the action for this operation is to promote, this 483 /// method returns the ValueType to promote to. 484 EVT getTypeToPromoteTo(unsigned Op, EVT VT) const { 485 assert(getOperationAction(Op, VT) == Promote && 486 "This operation isn't promoted!"); 487 488 // See if this has an explicit type specified. 489 std::map<std::pair<unsigned, MVT::SimpleValueType>, 490 MVT::SimpleValueType>::const_iterator PTTI = 491 PromoteToType.find(std::make_pair(Op, VT.getSimpleVT().SimpleTy)); 492 if (PTTI != PromoteToType.end()) return PTTI->second; 493 494 assert((VT.isInteger() || VT.isFloatingPoint()) && 495 "Cannot autopromote this type, add it with AddPromotedToType."); 496 497 EVT NVT = VT; 498 do { 499 NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1); 500 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 501 "Didn't find type to promote to!"); 502 } while (!isTypeLegal(NVT) || 503 getOperationAction(Op, NVT) == Promote); 504 return NVT; 505 } 506 507 /// getValueType - Return the EVT corresponding to this LLVM type. 508 /// This is fixed by the LLVM operations except for the pointer size. If 509 /// AllowUnknown is true, this will return MVT::Other for types with no EVT 510 /// counterpart (e.g. structs), otherwise it will assert. 511 EVT getValueType(Type *Ty, bool AllowUnknown = false) const { 512 // Lower scalar pointers to native pointer types. 513 if (Ty->isPointerTy()) return PointerTy; 514 515 if (Ty->isVectorTy()) { 516 VectorType *VTy = cast<VectorType>(Ty); 517 Type *Elm = VTy->getElementType(); 518 // Lower vectors of pointers to native pointer types. 519 if (Elm->isPointerTy()) 520 Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext()); 521 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), 522 VTy->getNumElements()); 523 } 524 return EVT::getEVT(Ty, AllowUnknown); 525 } 526 527 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 528 /// function arguments in the caller parameter area. This is the actual 529 /// alignment, not its logarithm. 530 virtual unsigned getByValTypeAlignment(Type *Ty) const; 531 532 /// getRegisterType - Return the type of registers that this ValueType will 533 /// eventually require. 534 EVT getRegisterType(MVT VT) const { 535 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); 536 return RegisterTypeForVT[VT.SimpleTy]; 537 } 538 539 /// getRegisterType - Return the type of registers that this ValueType will 540 /// eventually require. 541 EVT getRegisterType(LLVMContext &Context, EVT VT) const { 542 if (VT.isSimple()) { 543 assert((unsigned)VT.getSimpleVT().SimpleTy < 544 array_lengthof(RegisterTypeForVT)); 545 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; 546 } 547 if (VT.isVector()) { 548 EVT VT1, RegisterVT; 549 unsigned NumIntermediates; 550 (void)getVectorTypeBreakdown(Context, VT, VT1, 551 NumIntermediates, RegisterVT); 552 return RegisterVT; 553 } 554 if (VT.isInteger()) { 555 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 556 } 557 llvm_unreachable("Unsupported extended type!"); 558 } 559 560 /// getNumRegisters - Return the number of registers that this ValueType will 561 /// eventually require. This is one for any types promoted to live in larger 562 /// registers, but may be more than one for types (like i64) that are split 563 /// into pieces. For types like i140, which are first promoted then expanded, 564 /// it is the number of registers needed to hold all the bits of the original 565 /// type. For an i140 on a 32 bit machine this means 5 registers. 566 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const { 567 if (VT.isSimple()) { 568 assert((unsigned)VT.getSimpleVT().SimpleTy < 569 array_lengthof(NumRegistersForVT)); 570 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 571 } 572 if (VT.isVector()) { 573 EVT VT1, VT2; 574 unsigned NumIntermediates; 575 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 576 } 577 if (VT.isInteger()) { 578 unsigned BitWidth = VT.getSizeInBits(); 579 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 580 return (BitWidth + RegWidth - 1) / RegWidth; 581 } 582 llvm_unreachable("Unsupported extended type!"); 583 } 584 585 /// ShouldShrinkFPConstant - If true, then instruction selection should 586 /// seek to shrink the FP constant of the specified type to a smaller type 587 /// in order to save space and / or reduce runtime. 588 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 589 590 /// hasTargetDAGCombine - If true, the target has custom DAG combine 591 /// transformations that it can perform for the specified node. 592 bool hasTargetDAGCombine(ISD::NodeType NT) const { 593 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 594 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 595 } 596 597 /// This function returns the maximum number of store operations permitted 598 /// to replace a call to llvm.memset. The value is set by the target at the 599 /// performance threshold for such a replacement. If OptSize is true, 600 /// return the limit for functions that have OptSize attribute. 601 /// @brief Get maximum # of store operations permitted for llvm.memset 602 unsigned getMaxStoresPerMemset(bool OptSize) const { 603 return OptSize ? maxStoresPerMemsetOptSize : maxStoresPerMemset; 604 } 605 606 /// This function returns the maximum number of store operations permitted 607 /// to replace a call to llvm.memcpy. The value is set by the target at the 608 /// performance threshold for such a replacement. If OptSize is true, 609 /// return the limit for functions that have OptSize attribute. 610 /// @brief Get maximum # of store operations permitted for llvm.memcpy 611 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 612 return OptSize ? maxStoresPerMemcpyOptSize : maxStoresPerMemcpy; 613 } 614 615 /// This function returns the maximum number of store operations permitted 616 /// to replace a call to llvm.memmove. The value is set by the target at the 617 /// performance threshold for such a replacement. If OptSize is true, 618 /// return the limit for functions that have OptSize attribute. 619 /// @brief Get maximum # of store operations permitted for llvm.memmove 620 unsigned getMaxStoresPerMemmove(bool OptSize) const { 621 return OptSize ? maxStoresPerMemmoveOptSize : maxStoresPerMemmove; 622 } 623 624 /// This function returns true if the target allows unaligned memory accesses. 625 /// of the specified type. This is used, for example, in situations where an 626 /// array copy/move/set is converted to a sequence of store operations. It's 627 /// use helps to ensure that such replacements don't generate code that causes 628 /// an alignment error (trap) on the target machine. 629 /// @brief Determine if the target supports unaligned memory accesses. 630 virtual bool allowsUnalignedMemoryAccesses(EVT) const { 631 return false; 632 } 633 634 /// This function returns true if the target would benefit from code placement 635 /// optimization. 636 /// @brief Determine if the target should perform code placement optimization. 637 bool shouldOptimizeCodePlacement() const { 638 return benefitFromCodePlacementOpt; 639 } 640 641 /// getOptimalMemOpType - Returns the target specific optimal type for load 642 /// and store operations as a result of memset, memcpy, and memmove 643 /// lowering. If DstAlign is zero that means it's safe to destination 644 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 645 /// means there isn't a need to check it against alignment requirement, 646 /// probably because the source does not need to be loaded. If 647 /// 'IsZeroVal' is true, that means it's safe to return a 648 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 649 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 650 /// constant so it does not need to be loaded. 651 /// It returns EVT::Other if the type should be determined using generic 652 /// target-independent logic. 653 virtual EVT getOptimalMemOpType(uint64_t /*Size*/, 654 unsigned /*DstAlign*/, unsigned /*SrcAlign*/, 655 bool /*IsZeroVal*/, 656 bool /*MemcpyStrSrc*/, 657 MachineFunction &/*MF*/) const { 658 return MVT::Other; 659 } 660 661 /// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp 662 /// to implement llvm.setjmp. 663 bool usesUnderscoreSetJmp() const { 664 return UseUnderscoreSetJmp; 665 } 666 667 /// usesUnderscoreLongJmp - Determine if we should use _longjmp or longjmp 668 /// to implement llvm.longjmp. 669 bool usesUnderscoreLongJmp() const { 670 return UseUnderscoreLongJmp; 671 } 672 673 /// getStackPointerRegisterToSaveRestore - If a physical register, this 674 /// specifies the register that llvm.savestack/llvm.restorestack should save 675 /// and restore. 676 unsigned getStackPointerRegisterToSaveRestore() const { 677 return StackPointerRegisterToSaveRestore; 678 } 679 680 /// getExceptionPointerRegister - If a physical register, this returns 681 /// the register that receives the exception address on entry to a landing 682 /// pad. 683 unsigned getExceptionPointerRegister() const { 684 return ExceptionPointerRegister; 685 } 686 687 /// getExceptionSelectorRegister - If a physical register, this returns 688 /// the register that receives the exception typeid on entry to a landing 689 /// pad. 690 unsigned getExceptionSelectorRegister() const { 691 return ExceptionSelectorRegister; 692 } 693 694 /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never 695 /// set, the default is 200) 696 unsigned getJumpBufSize() const { 697 return JumpBufSize; 698 } 699 700 /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes 701 /// (if never set, the default is 0) 702 unsigned getJumpBufAlignment() const { 703 return JumpBufAlignment; 704 } 705 706 /// getMinStackArgumentAlignment - return the minimum stack alignment of an 707 /// argument. 708 unsigned getMinStackArgumentAlignment() const { 709 return MinStackArgumentAlignment; 710 } 711 712 /// getMinFunctionAlignment - return the minimum function alignment. 713 /// 714 unsigned getMinFunctionAlignment() const { 715 return MinFunctionAlignment; 716 } 717 718 /// getPrefFunctionAlignment - return the preferred function alignment. 719 /// 720 unsigned getPrefFunctionAlignment() const { 721 return PrefFunctionAlignment; 722 } 723 724 /// getPrefLoopAlignment - return the preferred loop alignment. 725 /// 726 unsigned getPrefLoopAlignment() const { 727 return PrefLoopAlignment; 728 } 729 730 /// getShouldFoldAtomicFences - return whether the combiner should fold 731 /// fence MEMBARRIER instructions into the atomic intrinsic instructions. 732 /// 733 bool getShouldFoldAtomicFences() const { 734 return ShouldFoldAtomicFences; 735 } 736 737 /// getInsertFencesFor - return whether the DAG builder should automatically 738 /// insert fences and reduce ordering for atomics. 739 /// 740 bool getInsertFencesForAtomic() const { 741 return InsertFencesForAtomic; 742 } 743 744 /// getPreIndexedAddressParts - returns true by value, base pointer and 745 /// offset pointer and addressing mode by reference if the node's address 746 /// can be legally represented as pre-indexed load / store address. 747 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 748 SDValue &/*Offset*/, 749 ISD::MemIndexedMode &/*AM*/, 750 SelectionDAG &/*DAG*/) const { 751 return false; 752 } 753 754 /// getPostIndexedAddressParts - returns true by value, base pointer and 755 /// offset pointer and addressing mode by reference if this node can be 756 /// combined with a load / store to form a post-indexed load / store. 757 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 758 SDValue &/*Base*/, SDValue &/*Offset*/, 759 ISD::MemIndexedMode &/*AM*/, 760 SelectionDAG &/*DAG*/) const { 761 return false; 762 } 763 764 /// getJumpTableEncoding - Return the entry encoding for a jump table in the 765 /// current function. The returned value is a member of the 766 /// MachineJumpTableInfo::JTEntryKind enum. 767 virtual unsigned getJumpTableEncoding() const; 768 769 virtual const MCExpr * 770 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 771 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 772 MCContext &/*Ctx*/) const { 773 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 774 } 775 776 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 777 /// jumptable. 778 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 779 SelectionDAG &DAG) const; 780 781 /// getPICJumpTableRelocBaseExpr - This returns the relocation base for the 782 /// given PIC jumptable, the same as getPICJumpTableRelocBase, but as an 783 /// MCExpr. 784 virtual const MCExpr * 785 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 786 unsigned JTI, MCContext &Ctx) const; 787 788 /// isOffsetFoldingLegal - Return true if folding a constant offset 789 /// with the given GlobalAddress is legal. It is frequently not legal in 790 /// PIC relocation models. 791 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 792 793 /// getStackCookieLocation - Return true if the target stores stack 794 /// protector cookies at a fixed offset in some non-standard address 795 /// space, and populates the address space and offset as 796 /// appropriate. 797 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/, 798 unsigned &/*Offset*/) const { 799 return false; 800 } 801 802 /// getMaximalGlobalOffset - Returns the maximal possible offset which can be 803 /// used for loads / stores from the global. 804 virtual unsigned getMaximalGlobalOffset() const { 805 return 0; 806 } 807 808 //===--------------------------------------------------------------------===// 809 // TargetLowering Optimization Methods 810 // 811 812 /// TargetLoweringOpt - A convenience struct that encapsulates a DAG, and two 813 /// SDValues for returning information from TargetLowering to its clients 814 /// that want to combine 815 struct TargetLoweringOpt { 816 SelectionDAG &DAG; 817 bool LegalTys; 818 bool LegalOps; 819 SDValue Old; 820 SDValue New; 821 822 explicit TargetLoweringOpt(SelectionDAG &InDAG, 823 bool LT, bool LO) : 824 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 825 826 bool LegalTypes() const { return LegalTys; } 827 bool LegalOperations() const { return LegalOps; } 828 829 bool CombineTo(SDValue O, SDValue N) { 830 Old = O; 831 New = N; 832 return true; 833 } 834 835 /// ShrinkDemandedConstant - Check to see if the specified operand of the 836 /// specified instruction is a constant integer. If so, check to see if 837 /// there are any bits set in the constant that are not demanded. If so, 838 /// shrink the constant and return true. 839 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded); 840 841 /// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the 842 /// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening 843 /// cast, but it could be generalized for targets with other types of 844 /// implicit widening casts. 845 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, 846 DebugLoc dl); 847 }; 848 849 /// SimplifyDemandedBits - Look at Op. At this point, we know that only the 850 /// DemandedMask bits of the result of Op are ever used downstream. If we can 851 /// use this information to simplify Op, create a new simplified DAG node and 852 /// return true, returning the original and new nodes in Old and New. 853 /// Otherwise, analyze the expression and return a mask of KnownOne and 854 /// KnownZero bits for the expression (used to simplify the caller). 855 /// The KnownZero/One bits may only be accurate for those bits in the 856 /// DemandedMask. 857 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, 858 APInt &KnownZero, APInt &KnownOne, 859 TargetLoweringOpt &TLO, unsigned Depth = 0) const; 860 861 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in 862 /// Mask are known to be either zero or one and return them in the 863 /// KnownZero/KnownOne bitsets. 864 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 865 APInt &KnownZero, 866 APInt &KnownOne, 867 const SelectionDAG &DAG, 868 unsigned Depth = 0) const; 869 870 /// ComputeNumSignBitsForTargetNode - This method can be implemented by 871 /// targets that want to expose additional information about sign bits to the 872 /// DAG Combiner. 873 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 874 unsigned Depth = 0) const; 875 876 struct DAGCombinerInfo { 877 void *DC; // The DAG Combiner object. 878 bool BeforeLegalize; 879 bool BeforeLegalizeOps; 880 bool CalledByLegalizer; 881 public: 882 SelectionDAG &DAG; 883 884 DAGCombinerInfo(SelectionDAG &dag, bool bl, bool blo, bool cl, void *dc) 885 : DC(dc), BeforeLegalize(bl), BeforeLegalizeOps(blo), 886 CalledByLegalizer(cl), DAG(dag) {} 887 888 bool isBeforeLegalize() const { return BeforeLegalize; } 889 bool isBeforeLegalizeOps() const { return BeforeLegalizeOps; } 890 bool isCalledByLegalizer() const { return CalledByLegalizer; } 891 892 void AddToWorklist(SDNode *N); 893 void RemoveFromWorklist(SDNode *N); 894 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To, 895 bool AddTo = true); 896 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 897 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 898 899 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 900 }; 901 902 /// SimplifySetCC - Try to simplify a setcc built with the specified operands 903 /// and cc. If it is unable to simplify it, return a null SDValue. 904 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 905 ISD::CondCode Cond, bool foldBooleans, 906 DAGCombinerInfo &DCI, DebugLoc dl) const; 907 908 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 909 /// node is a GlobalAddress + offset. 910 virtual bool 911 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 912 913 /// PerformDAGCombine - This method will be invoked for all target nodes and 914 /// for any target-independent nodes that the target has registered with 915 /// invoke it for. 916 /// 917 /// The semantics are as follows: 918 /// Return Value: 919 /// SDValue.Val == 0 - No change was made 920 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 921 /// otherwise - N should be replaced by the returned Operand. 922 /// 923 /// In addition, methods provided by DAGCombinerInfo may be used to perform 924 /// more complex transformations. 925 /// 926 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 927 928 /// isTypeDesirableForOp - Return true if the target has native support for 929 /// the specified value type and it is 'desirable' to use the type for the 930 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 931 /// instruction encodings are longer and some i16 instructions are slow. 932 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 933 // By default, assume all legal types are desirable. 934 return isTypeLegal(VT); 935 } 936 937 /// isDesirableToPromoteOp - Return true if it is profitable for dag combiner 938 /// to transform a floating point op of specified opcode to a equivalent op of 939 /// an integer type. e.g. f32 load -> i32 load can be profitable on ARM. 940 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 941 EVT /*VT*/) const { 942 return false; 943 } 944 945 /// IsDesirableToPromoteOp - This method query the target whether it is 946 /// beneficial for dag combiner to promote the specified node. If true, it 947 /// should return the desired promotion type by reference. 948 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 949 return false; 950 } 951 952 //===--------------------------------------------------------------------===// 953 // TargetLowering Configuration Methods - These methods should be invoked by 954 // the derived class constructor to configure this object for the target. 955 // 956 957 protected: 958 /// setBooleanContents - Specify how the target extends the result of a 959 /// boolean value from i1 to a wider type. See getBooleanContents. 960 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; } 961 /// setBooleanVectorContents - Specify how the target extends the result 962 /// of a vector boolean value from a vector of i1 to a wider type. See 963 /// getBooleanContents. 964 void setBooleanVectorContents(BooleanContent Ty) { 965 BooleanVectorContents = Ty; 966 } 967 968 /// setSchedulingPreference - Specify the target scheduling preference. 969 void setSchedulingPreference(Sched::Preference Pref) { 970 SchedPreferenceInfo = Pref; 971 } 972 973 /// setUseUnderscoreSetJmp - Indicate whether this target prefers to 974 /// use _setjmp to implement llvm.setjmp or the non _ version. 975 /// Defaults to false. 976 void setUseUnderscoreSetJmp(bool Val) { 977 UseUnderscoreSetJmp = Val; 978 } 979 980 /// setUseUnderscoreLongJmp - Indicate whether this target prefers to 981 /// use _longjmp to implement llvm.longjmp or the non _ version. 982 /// Defaults to false. 983 void setUseUnderscoreLongJmp(bool Val) { 984 UseUnderscoreLongJmp = Val; 985 } 986 987 /// setStackPointerRegisterToSaveRestore - If set to a physical register, this 988 /// specifies the register that llvm.savestack/llvm.restorestack should save 989 /// and restore. 990 void setStackPointerRegisterToSaveRestore(unsigned R) { 991 StackPointerRegisterToSaveRestore = R; 992 } 993 994 /// setExceptionPointerRegister - If set to a physical register, this sets 995 /// the register that receives the exception address on entry to a landing 996 /// pad. 997 void setExceptionPointerRegister(unsigned R) { 998 ExceptionPointerRegister = R; 999 } 1000 1001 /// setExceptionSelectorRegister - If set to a physical register, this sets 1002 /// the register that receives the exception typeid on entry to a landing 1003 /// pad. 1004 void setExceptionSelectorRegister(unsigned R) { 1005 ExceptionSelectorRegister = R; 1006 } 1007 1008 /// SelectIsExpensive - Tells the code generator not to expand operations 1009 /// into sequences that use the select operations if possible. 1010 void setSelectIsExpensive(bool isExpensive = true) { 1011 SelectIsExpensive = isExpensive; 1012 } 1013 1014 /// JumpIsExpensive - Tells the code generator not to expand sequence of 1015 /// operations into a separate sequences that increases the amount of 1016 /// flow control. 1017 void setJumpIsExpensive(bool isExpensive = true) { 1018 JumpIsExpensive = isExpensive; 1019 } 1020 1021 /// setIntDivIsCheap - Tells the code generator that integer divide is 1022 /// expensive, and if possible, should be replaced by an alternate sequence 1023 /// of instructions not containing an integer divide. 1024 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } 1025 1026 /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate 1027 /// srl/add/sra for a signed divide by power of two, and let the target handle 1028 /// it. 1029 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; } 1030 1031 /// addRegisterClass - Add the specified register class as an available 1032 /// regclass for the specified value type. This indicates the selector can 1033 /// handle values of that class natively. 1034 void addRegisterClass(EVT VT, const TargetRegisterClass *RC) { 1035 assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 1036 AvailableRegClasses.push_back(std::make_pair(VT, RC)); 1037 RegClassForVT[VT.getSimpleVT().SimpleTy] = RC; 1038 } 1039 1040 /// findRepresentativeClass - Return the largest legal super-reg register class 1041 /// of the register class for the specified type and its associated "cost". 1042 virtual std::pair<const TargetRegisterClass*, uint8_t> 1043 findRepresentativeClass(EVT VT) const; 1044 1045 /// computeRegisterProperties - Once all of the register classes are added, 1046 /// this allows us to compute derived properties we expose. 1047 void computeRegisterProperties(); 1048 1049 /// setOperationAction - Indicate that the specified operation does not work 1050 /// with the specified type and indicate what to do about it. 1051 void setOperationAction(unsigned Op, MVT VT, 1052 LegalizeAction Action) { 1053 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); 1054 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action; 1055 } 1056 1057 /// setLoadExtAction - Indicate that the specified load with extension does 1058 /// not work with the specified type and indicate what to do about it. 1059 void setLoadExtAction(unsigned ExtType, MVT VT, 1060 LegalizeAction Action) { 1061 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE && 1062 "Table isn't big enough!"); 1063 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action; 1064 } 1065 1066 /// setTruncStoreAction - Indicate that the specified truncating store does 1067 /// not work with the specified type and indicate what to do about it. 1068 void setTruncStoreAction(MVT ValVT, MVT MemVT, 1069 LegalizeAction Action) { 1070 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE && 1071 "Table isn't big enough!"); 1072 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action; 1073 } 1074 1075 /// setIndexedLoadAction - Indicate that the specified indexed load does or 1076 /// does not work with the specified type and indicate what to do abort 1077 /// it. NOTE: All indexed mode loads are initialized to Expand in 1078 /// TargetLowering.cpp 1079 void setIndexedLoadAction(unsigned IdxMode, MVT VT, 1080 LegalizeAction Action) { 1081 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 1082 (unsigned)Action < 0xf && "Table isn't big enough!"); 1083 // Load action are kept in the upper half. 1084 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0; 1085 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4; 1086 } 1087 1088 /// setIndexedStoreAction - Indicate that the specified indexed store does or 1089 /// does not work with the specified type and indicate what to do about 1090 /// it. NOTE: All indexed mode stores are initialized to Expand in 1091 /// TargetLowering.cpp 1092 void setIndexedStoreAction(unsigned IdxMode, MVT VT, 1093 LegalizeAction Action) { 1094 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE && 1095 (unsigned)Action < 0xf && "Table isn't big enough!"); 1096 // Store action are kept in the lower half. 1097 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f; 1098 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action); 1099 } 1100 1101 /// setCondCodeAction - Indicate that the specified condition code is or isn't 1102 /// supported on the target and indicate what to do about it. 1103 void setCondCodeAction(ISD::CondCode CC, MVT VT, 1104 LegalizeAction Action) { 1105 assert(VT < MVT::LAST_VALUETYPE && 1106 (unsigned)CC < array_lengthof(CondCodeActions) && 1107 "Table isn't big enough!"); 1108 CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2); 1109 CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2; 1110 } 1111 1112 /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the 1113 /// promotion code defaults to trying a larger integer/fp until it can find 1114 /// one that works. If that default is insufficient, this method can be used 1115 /// by the target to override the default. 1116 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 1117 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 1118 } 1119 1120 /// setTargetDAGCombine - Targets should invoke this method for each target 1121 /// independent node that they want to provide a custom DAG combiner for by 1122 /// implementing the PerformDAGCombine virtual method. 1123 void setTargetDAGCombine(ISD::NodeType NT) { 1124 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 1125 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); 1126 } 1127 1128 /// setJumpBufSize - Set the target's required jmp_buf buffer size (in 1129 /// bytes); default is 200 1130 void setJumpBufSize(unsigned Size) { 1131 JumpBufSize = Size; 1132 } 1133 1134 /// setJumpBufAlignment - Set the target's required jmp_buf buffer 1135 /// alignment (in bytes); default is 0 1136 void setJumpBufAlignment(unsigned Align) { 1137 JumpBufAlignment = Align; 1138 } 1139 1140 /// setMinFunctionAlignment - Set the target's minimum function alignment (in 1141 /// log2(bytes)) 1142 void setMinFunctionAlignment(unsigned Align) { 1143 MinFunctionAlignment = Align; 1144 } 1145 1146 /// setPrefFunctionAlignment - Set the target's preferred function alignment. 1147 /// This should be set if there is a performance benefit to 1148 /// higher-than-minimum alignment (in log2(bytes)) 1149 void setPrefFunctionAlignment(unsigned Align) { 1150 PrefFunctionAlignment = Align; 1151 } 1152 1153 /// setPrefLoopAlignment - Set the target's preferred loop alignment. Default 1154 /// alignment is zero, it means the target does not care about loop alignment. 1155 /// The alignment is specified in log2(bytes). 1156 void setPrefLoopAlignment(unsigned Align) { 1157 PrefLoopAlignment = Align; 1158 } 1159 1160 /// setMinStackArgumentAlignment - Set the minimum stack alignment of an 1161 /// argument (in log2(bytes)). 1162 void setMinStackArgumentAlignment(unsigned Align) { 1163 MinStackArgumentAlignment = Align; 1164 } 1165 1166 /// setShouldFoldAtomicFences - Set if the target's implementation of the 1167 /// atomic operation intrinsics includes locking. Default is false. 1168 void setShouldFoldAtomicFences(bool fold) { 1169 ShouldFoldAtomicFences = fold; 1170 } 1171 1172 /// setInsertFencesForAtomic - Set if the the DAG builder should 1173 /// automatically insert fences and reduce the order of atomic memory 1174 /// operations to Monotonic. 1175 void setInsertFencesForAtomic(bool fence) { 1176 InsertFencesForAtomic = fence; 1177 } 1178 1179 public: 1180 //===--------------------------------------------------------------------===// 1181 // Lowering methods - These methods must be implemented by targets so that 1182 // the SelectionDAGLowering code knows how to lower these. 1183 // 1184 1185 /// LowerFormalArguments - This hook must be implemented to lower the 1186 /// incoming (formal) arguments, described by the Ins array, into the 1187 /// specified DAG. The implementation should fill in the InVals array 1188 /// with legal-type argument values, and return the resulting token 1189 /// chain value. 1190 /// 1191 virtual SDValue 1192 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1193 bool /*isVarArg*/, 1194 const SmallVectorImpl<ISD::InputArg> &/*Ins*/, 1195 DebugLoc /*dl*/, SelectionDAG &/*DAG*/, 1196 SmallVectorImpl<SDValue> &/*InVals*/) const { 1197 llvm_unreachable("Not Implemented"); 1198 } 1199 1200 /// LowerCallTo - This function lowers an abstract call to a function into an 1201 /// actual call. This returns a pair of operands. The first element is the 1202 /// return value for the function (if RetTy is not VoidTy). The second 1203 /// element is the outgoing token chain. It calls LowerCall to do the actual 1204 /// lowering. 1205 struct ArgListEntry { 1206 SDValue Node; 1207 Type* Ty; 1208 bool isSExt : 1; 1209 bool isZExt : 1; 1210 bool isInReg : 1; 1211 bool isSRet : 1; 1212 bool isNest : 1; 1213 bool isByVal : 1; 1214 uint16_t Alignment; 1215 1216 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), 1217 isSRet(false), isNest(false), isByVal(false), Alignment(0) { } 1218 }; 1219 typedef std::vector<ArgListEntry> ArgListTy; 1220 std::pair<SDValue, SDValue> 1221 LowerCallTo(SDValue Chain, Type *RetTy, bool RetSExt, bool RetZExt, 1222 bool isVarArg, bool isInreg, unsigned NumFixedArgs, 1223 CallingConv::ID CallConv, bool isTailCall, 1224 bool doesNotRet, bool isReturnValueUsed, 1225 SDValue Callee, ArgListTy &Args, 1226 SelectionDAG &DAG, DebugLoc dl) const; 1227 1228 /// LowerCall - This hook must be implemented to lower calls into the 1229 /// the specified DAG. The outgoing arguments to the call are described 1230 /// by the Outs array, and the values to be returned by the call are 1231 /// described by the Ins array. The implementation should fill in the 1232 /// InVals array with legal-type return values from the call, and return 1233 /// the resulting token chain value. 1234 virtual SDValue 1235 LowerCall(SDValue /*Chain*/, SDValue /*Callee*/, 1236 CallingConv::ID /*CallConv*/, bool /*isVarArg*/, 1237 bool /*doesNotRet*/, bool &/*isTailCall*/, 1238 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1239 const SmallVectorImpl<SDValue> &/*OutVals*/, 1240 const SmallVectorImpl<ISD::InputArg> &/*Ins*/, 1241 DebugLoc /*dl*/, SelectionDAG &/*DAG*/, 1242 SmallVectorImpl<SDValue> &/*InVals*/) const { 1243 llvm_unreachable("Not Implemented"); 1244 } 1245 1246 /// HandleByVal - Target-specific cleanup for formal ByVal parameters. 1247 virtual void HandleByVal(CCState *, unsigned &) const {} 1248 1249 /// CanLowerReturn - This hook should be implemented to check whether the 1250 /// return values described by the Outs array can fit into the return 1251 /// registers. If false is returned, an sret-demotion is performed. 1252 /// 1253 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 1254 MachineFunction &/*MF*/, bool /*isVarArg*/, 1255 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1256 LLVMContext &/*Context*/) const 1257 { 1258 // Return true by default to get preexisting behavior. 1259 return true; 1260 } 1261 1262 /// LowerReturn - This hook must be implemented to lower outgoing 1263 /// return values, described by the Outs array, into the specified 1264 /// DAG. The implementation should return the resulting token chain 1265 /// value. 1266 /// 1267 virtual SDValue 1268 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 1269 bool /*isVarArg*/, 1270 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 1271 const SmallVectorImpl<SDValue> &/*OutVals*/, 1272 DebugLoc /*dl*/, SelectionDAG &/*DAG*/) const { 1273 llvm_unreachable("Not Implemented"); 1274 } 1275 1276 /// isUsedByReturnOnly - Return true if result of the specified node is used 1277 /// by a return node only. It also compute and return the input chain for the 1278 /// tail call. 1279 /// This is used to determine whether it is possible 1280 /// to codegen a libcall as tail call at legalization time. 1281 virtual bool isUsedByReturnOnly(SDNode *, SDValue &Chain) const { 1282 return false; 1283 } 1284 1285 /// mayBeEmittedAsTailCall - Return true if the target may be able emit the 1286 /// call instruction as a tail call. This is used by optimization passes to 1287 /// determine if it's profitable to duplicate return instructions to enable 1288 /// tailcall optimization. 1289 virtual bool mayBeEmittedAsTailCall(CallInst *) const { 1290 return false; 1291 } 1292 1293 /// getTypeForExtArgOrReturn - Return the type that should be used to zero or 1294 /// sign extend a zeroext/signext integer argument or return value. 1295 /// FIXME: Most C calling convention requires the return type to be promoted, 1296 /// but this is not true all the time, e.g. i1 on x86-64. It is also not 1297 /// necessary for non-C calling conventions. The frontend should handle this 1298 /// and include all of the necessary information. 1299 virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, 1300 ISD::NodeType /*ExtendKind*/) const { 1301 EVT MinVT = getRegisterType(Context, MVT::i32); 1302 return VT.bitsLT(MinVT) ? MinVT : VT; 1303 } 1304 1305 /// LowerOperationWrapper - This callback is invoked by the type legalizer 1306 /// to legalize nodes with an illegal operand type but legal result types. 1307 /// It replaces the LowerOperation callback in the type Legalizer. 1308 /// The reason we can not do away with LowerOperation entirely is that 1309 /// LegalizeDAG isn't yet ready to use this callback. 1310 /// TODO: Consider merging with ReplaceNodeResults. 1311 1312 /// The target places new result values for the node in Results (their number 1313 /// and types must exactly match those of the original return values of 1314 /// the node), or leaves Results empty, which indicates that the node is not 1315 /// to be custom lowered after all. 1316 /// The default implementation calls LowerOperation. 1317 virtual void LowerOperationWrapper(SDNode *N, 1318 SmallVectorImpl<SDValue> &Results, 1319 SelectionDAG &DAG) const; 1320 1321 /// LowerOperation - This callback is invoked for operations that are 1322 /// unsupported by the target, which are registered to use 'custom' lowering, 1323 /// and whose defined values are all legal. 1324 /// If the target has no operations that require custom lowering, it need not 1325 /// implement this. The default implementation of this aborts. 1326 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 1327 1328 /// ReplaceNodeResults - This callback is invoked when a node result type is 1329 /// illegal for the target, and the operation was registered to use 'custom' 1330 /// lowering for that result type. The target places new result values for 1331 /// the node in Results (their number and types must exactly match those of 1332 /// the original return values of the node), or leaves Results empty, which 1333 /// indicates that the node is not to be custom lowered after all. 1334 /// 1335 /// If the target has no operations that require custom lowering, it need not 1336 /// implement this. The default implementation aborts. 1337 virtual void ReplaceNodeResults(SDNode * /*N*/, 1338 SmallVectorImpl<SDValue> &/*Results*/, 1339 SelectionDAG &/*DAG*/) const { 1340 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 1341 } 1342 1343 /// getTargetNodeName() - This method returns the name of a target specific 1344 /// DAG node. 1345 virtual const char *getTargetNodeName(unsigned Opcode) const; 1346 1347 /// createFastISel - This method returns a target specific FastISel object, 1348 /// or null if the target does not support "fast" ISel. 1349 virtual FastISel *createFastISel(FunctionLoweringInfo &) const { 1350 return 0; 1351 } 1352 1353 //===--------------------------------------------------------------------===// 1354 // Inline Asm Support hooks 1355 // 1356 1357 /// ExpandInlineAsm - This hook allows the target to expand an inline asm 1358 /// call to be explicit llvm code if it wants to. This is useful for 1359 /// turning simple inline asms into LLVM intrinsics, which gives the 1360 /// compiler more information about the behavior of the code. 1361 virtual bool ExpandInlineAsm(CallInst *) const { 1362 return false; 1363 } 1364 1365 enum ConstraintType { 1366 C_Register, // Constraint represents specific register(s). 1367 C_RegisterClass, // Constraint represents any of register(s) in class. 1368 C_Memory, // Memory constraint. 1369 C_Other, // Something else. 1370 C_Unknown // Unsupported constraint. 1371 }; 1372 1373 enum ConstraintWeight { 1374 // Generic weights. 1375 CW_Invalid = -1, // No match. 1376 CW_Okay = 0, // Acceptable. 1377 CW_Good = 1, // Good weight. 1378 CW_Better = 2, // Better weight. 1379 CW_Best = 3, // Best weight. 1380 1381 // Well-known weights. 1382 CW_SpecificReg = CW_Okay, // Specific register operands. 1383 CW_Register = CW_Good, // Register operands. 1384 CW_Memory = CW_Better, // Memory operands. 1385 CW_Constant = CW_Best, // Constant operand. 1386 CW_Default = CW_Okay // Default or don't know type. 1387 }; 1388 1389 /// AsmOperandInfo - This contains information for each constraint that we are 1390 /// lowering. 1391 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 1392 /// ConstraintCode - This contains the actual string for the code, like "m". 1393 /// TargetLowering picks the 'best' code from ConstraintInfo::Codes that 1394 /// most closely matches the operand. 1395 std::string ConstraintCode; 1396 1397 /// ConstraintType - Information about the constraint code, e.g. Register, 1398 /// RegisterClass, Memory, Other, Unknown. 1399 TargetLowering::ConstraintType ConstraintType; 1400 1401 /// CallOperandval - If this is the result output operand or a 1402 /// clobber, this is null, otherwise it is the incoming operand to the 1403 /// CallInst. This gets modified as the asm is processed. 1404 Value *CallOperandVal; 1405 1406 /// ConstraintVT - The ValueType for the operand value. 1407 EVT ConstraintVT; 1408 1409 /// isMatchingInputConstraint - Return true of this is an input operand that 1410 /// is a matching constraint like "4". 1411 bool isMatchingInputConstraint() const; 1412 1413 /// getMatchedOperand - If this is an input matching constraint, this method 1414 /// returns the output operand it matches. 1415 unsigned getMatchedOperand() const; 1416 1417 /// Copy constructor for copying from an AsmOperandInfo. 1418 AsmOperandInfo(const AsmOperandInfo &info) 1419 : InlineAsm::ConstraintInfo(info), 1420 ConstraintCode(info.ConstraintCode), 1421 ConstraintType(info.ConstraintType), 1422 CallOperandVal(info.CallOperandVal), 1423 ConstraintVT(info.ConstraintVT) { 1424 } 1425 1426 /// Copy constructor for copying from a ConstraintInfo. 1427 AsmOperandInfo(const InlineAsm::ConstraintInfo &info) 1428 : InlineAsm::ConstraintInfo(info), 1429 ConstraintType(TargetLowering::C_Unknown), 1430 CallOperandVal(0), ConstraintVT(MVT::Other) { 1431 } 1432 }; 1433 1434 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector; 1435 1436 /// ParseConstraints - Split up the constraint string from the inline 1437 /// assembly value into the specific constraints and their prefixes, 1438 /// and also tie in the associated operand values. 1439 /// If this returns an empty vector, and if the constraint string itself 1440 /// isn't empty, there was an error parsing. 1441 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const; 1442 1443 /// Examine constraint type and operand type and determine a weight value. 1444 /// The operand object must already have been set up with the operand type. 1445 virtual ConstraintWeight getMultipleConstraintMatchWeight( 1446 AsmOperandInfo &info, int maIndex) const; 1447 1448 /// Examine constraint string and operand type and determine a weight value. 1449 /// The operand object must already have been set up with the operand type. 1450 virtual ConstraintWeight getSingleConstraintMatchWeight( 1451 AsmOperandInfo &info, const char *constraint) const; 1452 1453 /// ComputeConstraintToUse - Determines the constraint code and constraint 1454 /// type to use for the specific AsmOperandInfo, setting 1455 /// OpInfo.ConstraintCode and OpInfo.ConstraintType. If the actual operand 1456 /// being passed in is available, it can be passed in as Op, otherwise an 1457 /// empty SDValue can be passed. 1458 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 1459 SDValue Op, 1460 SelectionDAG *DAG = 0) const; 1461 1462 /// getConstraintType - Given a constraint, return the type of constraint it 1463 /// is for this target. 1464 virtual ConstraintType getConstraintType(const std::string &Constraint) const; 1465 1466 /// getRegForInlineAsmConstraint - Given a physical register constraint (e.g. 1467 /// {edx}), return the register number and the register class for the 1468 /// register. 1469 /// 1470 /// Given a register class constraint, like 'r', if this corresponds directly 1471 /// to an LLVM register class, return a register of 0 and the register class 1472 /// pointer. 1473 /// 1474 /// This should only be used for C_Register constraints. On error, 1475 /// this returns a register number of 0 and a null register class pointer.. 1476 virtual std::pair<unsigned, const TargetRegisterClass*> 1477 getRegForInlineAsmConstraint(const std::string &Constraint, 1478 EVT VT) const; 1479 1480 /// LowerXConstraint - try to replace an X constraint, which matches anything, 1481 /// with another that has more specific requirements based on the type of the 1482 /// corresponding operand. This returns null if there is no replacement to 1483 /// make. 1484 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 1485 1486 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 1487 /// vector. If it is invalid, don't add anything to Ops. 1488 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 1489 std::vector<SDValue> &Ops, 1490 SelectionDAG &DAG) const; 1491 1492 //===--------------------------------------------------------------------===// 1493 // Instruction Emitting Hooks 1494 // 1495 1496 // EmitInstrWithCustomInserter - This method should be implemented by targets 1497 // that mark instructions with the 'usesCustomInserter' flag. These 1498 // instructions are special in various ways, which require special support to 1499 // insert. The specified MachineInstr is created but not inserted into any 1500 // basic blocks, and this method is called to expand it into a sequence of 1501 // instructions, potentially also creating new basic blocks and control flow. 1502 virtual MachineBasicBlock * 1503 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const; 1504 1505 /// AdjustInstrPostInstrSelection - This method should be implemented by 1506 /// targets that mark instructions with the 'hasPostISelHook' flag. These 1507 /// instructions must be adjusted after instruction selection by target hooks. 1508 /// e.g. To fill in optional defs for ARM 's' setting instructions. 1509 virtual void 1510 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const; 1511 1512 //===--------------------------------------------------------------------===// 1513 // Addressing mode description hooks (used by LSR etc). 1514 // 1515 1516 /// AddrMode - This represents an addressing mode of: 1517 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 1518 /// If BaseGV is null, there is no BaseGV. 1519 /// If BaseOffs is zero, there is no base offset. 1520 /// If HasBaseReg is false, there is no base register. 1521 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 1522 /// no scale. 1523 /// 1524 struct AddrMode { 1525 GlobalValue *BaseGV; 1526 int64_t BaseOffs; 1527 bool HasBaseReg; 1528 int64_t Scale; 1529 AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} 1530 }; 1531 1532 /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the 1533 /// same BB as Load/Store instructions reading the address. This allows as 1534 /// much computation as possible to be done in the address mode for that 1535 /// operand. This hook lets targets also pass back when this should be done 1536 /// on intrinsics which load/store. 1537 virtual bool GetAddrModeArguments(IntrinsicInst *I, 1538 SmallVectorImpl<Value*> &Ops, 1539 Type *&AccessTy) const { 1540 return false; 1541 } 1542 1543 /// isLegalAddressingMode - Return true if the addressing mode represented by 1544 /// AM is legal for this target, for a load/store of the specified type. 1545 /// The type may be VoidTy, in which case only return true if the addressing 1546 /// mode is legal for a load/store of any legal type. 1547 /// TODO: Handle pre/postinc as well. 1548 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; 1549 1550 /// isLegalICmpImmediate - Return true if the specified immediate is legal 1551 /// icmp immediate, that is the target has icmp instructions which can compare 1552 /// a register against the immediate without having to materialize the 1553 /// immediate into a register. 1554 virtual bool isLegalICmpImmediate(int64_t) const { 1555 return true; 1556 } 1557 1558 /// isLegalAddImmediate - Return true if the specified immediate is legal 1559 /// add immediate, that is the target has add instructions which can add 1560 /// a register with the immediate without having to materialize the 1561 /// immediate into a register. 1562 virtual bool isLegalAddImmediate(int64_t) const { 1563 return true; 1564 } 1565 1566 /// isTruncateFree - Return true if it's free to truncate a value of 1567 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 1568 /// register EAX to i16 by referencing its sub-register AX. 1569 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1570 return false; 1571 } 1572 1573 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const { 1574 return false; 1575 } 1576 1577 /// isZExtFree - Return true if any actual instruction that defines a 1578 /// value of type Ty1 implicitly zero-extends the value to Ty2 in the result 1579 /// register. This does not necessarily include registers defined in 1580 /// unknown ways, such as incoming arguments, or copies from unknown 1581 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this 1582 /// does not necessarily apply to truncate instructions. e.g. on x86-64, 1583 /// all instructions that define 32-bit values implicit zero-extend the 1584 /// result out to 64 bits. 1585 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const { 1586 return false; 1587 } 1588 1589 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const { 1590 return false; 1591 } 1592 1593 /// isFNegFree - Return true if an fneg operation is free to the point where 1594 /// it is never worthwhile to replace it with a bitwise operation. 1595 virtual bool isFNegFree(EVT) const { 1596 return false; 1597 } 1598 1599 /// isFAbsFree - Return true if an fneg operation is free to the point where 1600 /// it is never worthwhile to replace it with a bitwise operation. 1601 virtual bool isFAbsFree(EVT) const { 1602 return false; 1603 } 1604 1605 /// isNarrowingProfitable - Return true if it's profitable to narrow 1606 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow 1607 /// from i32 to i8 but not from i32 to i16. 1608 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { 1609 return false; 1610 } 1611 1612 //===--------------------------------------------------------------------===// 1613 // Div utility functions 1614 // 1615 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, DebugLoc dl, 1616 SelectionDAG &DAG) const; 1617 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 1618 std::vector<SDNode*>* Created) const; 1619 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 1620 std::vector<SDNode*>* Created) const; 1621 1622 1623 //===--------------------------------------------------------------------===// 1624 // Runtime Library hooks 1625 // 1626 1627 /// setLibcallName - Rename the default libcall routine name for the specified 1628 /// libcall. 1629 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 1630 LibcallRoutineNames[Call] = Name; 1631 } 1632 1633 /// getLibcallName - Get the libcall routine name for the specified libcall. 1634 /// 1635 const char *getLibcallName(RTLIB::Libcall Call) const { 1636 return LibcallRoutineNames[Call]; 1637 } 1638 1639 /// setCmpLibcallCC - Override the default CondCode to be used to test the 1640 /// result of the comparison libcall against zero. 1641 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 1642 CmpLibcallCCs[Call] = CC; 1643 } 1644 1645 /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of 1646 /// the comparison libcall against zero. 1647 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 1648 return CmpLibcallCCs[Call]; 1649 } 1650 1651 /// setLibcallCallingConv - Set the CallingConv that should be used for the 1652 /// specified libcall. 1653 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 1654 LibcallCallingConvs[Call] = CC; 1655 } 1656 1657 /// getLibcallCallingConv - Get the CallingConv that should be used for the 1658 /// specified libcall. 1659 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 1660 return LibcallCallingConvs[Call]; 1661 } 1662 1663 private: 1664 const TargetMachine &TM; 1665 const TargetData *TD; 1666 const TargetLoweringObjectFile &TLOF; 1667 1668 /// We are in the process of implementing a new TypeLegalization action 1669 /// which is the promotion of vector elements. This feature is under 1670 /// development. Until this feature is complete, it is only enabled using a 1671 /// flag. We pass this flag using a member because of circular dep issues. 1672 /// This member will be removed with the flag once we complete the transition. 1673 bool mayPromoteElements; 1674 1675 /// PointerTy - The type to use for pointers, usually i32 or i64. 1676 /// 1677 MVT PointerTy; 1678 1679 /// IsLittleEndian - True if this is a little endian target. 1680 /// 1681 bool IsLittleEndian; 1682 1683 /// SelectIsExpensive - Tells the code generator not to expand operations 1684 /// into sequences that use the select operations if possible. 1685 bool SelectIsExpensive; 1686 1687 /// IntDivIsCheap - Tells the code generator not to expand integer divides by 1688 /// constants into a sequence of muls, adds, and shifts. This is a hack until 1689 /// a real cost model is in place. If we ever optimize for size, this will be 1690 /// set to true unconditionally. 1691 bool IntDivIsCheap; 1692 1693 /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate 1694 /// srl/add/sra for a signed divide by power of two, and let the target handle 1695 /// it. 1696 bool Pow2DivIsCheap; 1697 1698 /// JumpIsExpensive - Tells the code generator that it shouldn't generate 1699 /// extra flow control instructions and should attempt to combine flow 1700 /// control instructions via predication. 1701 bool JumpIsExpensive; 1702 1703 /// UseUnderscoreSetJmp - This target prefers to use _setjmp to implement 1704 /// llvm.setjmp. Defaults to false. 1705 bool UseUnderscoreSetJmp; 1706 1707 /// UseUnderscoreLongJmp - This target prefers to use _longjmp to implement 1708 /// llvm.longjmp. Defaults to false. 1709 bool UseUnderscoreLongJmp; 1710 1711 /// BooleanContents - Information about the contents of the high-bits in 1712 /// boolean values held in a type wider than i1. See getBooleanContents. 1713 BooleanContent BooleanContents; 1714 /// BooleanVectorContents - Information about the contents of the high-bits 1715 /// in boolean vector values when the element type is wider than i1. See 1716 /// getBooleanContents. 1717 BooleanContent BooleanVectorContents; 1718 1719 /// SchedPreferenceInfo - The target scheduling preference: shortest possible 1720 /// total cycles or lowest register usage. 1721 Sched::Preference SchedPreferenceInfo; 1722 1723 /// JumpBufSize - The size, in bytes, of the target's jmp_buf buffers 1724 unsigned JumpBufSize; 1725 1726 /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf 1727 /// buffers 1728 unsigned JumpBufAlignment; 1729 1730 /// MinStackArgumentAlignment - The minimum alignment that any argument 1731 /// on the stack needs to have. 1732 /// 1733 unsigned MinStackArgumentAlignment; 1734 1735 /// MinFunctionAlignment - The minimum function alignment (used when 1736 /// optimizing for size, and to prevent explicitly provided alignment 1737 /// from leading to incorrect code). 1738 /// 1739 unsigned MinFunctionAlignment; 1740 1741 /// PrefFunctionAlignment - The preferred function alignment (used when 1742 /// alignment unspecified and optimizing for speed). 1743 /// 1744 unsigned PrefFunctionAlignment; 1745 1746 /// PrefLoopAlignment - The preferred loop alignment. 1747 /// 1748 unsigned PrefLoopAlignment; 1749 1750 /// ShouldFoldAtomicFences - Whether fencing MEMBARRIER instructions should 1751 /// be folded into the enclosed atomic intrinsic instruction by the 1752 /// combiner. 1753 bool ShouldFoldAtomicFences; 1754 1755 /// InsertFencesForAtomic - Whether the DAG builder should automatically 1756 /// insert fences and reduce ordering for atomics. (This will be set for 1757 /// for most architectures with weak memory ordering.) 1758 bool InsertFencesForAtomic; 1759 1760 /// StackPointerRegisterToSaveRestore - If set to a physical register, this 1761 /// specifies the register that llvm.savestack/llvm.restorestack should save 1762 /// and restore. 1763 unsigned StackPointerRegisterToSaveRestore; 1764 1765 /// ExceptionPointerRegister - If set to a physical register, this specifies 1766 /// the register that receives the exception address on entry to a landing 1767 /// pad. 1768 unsigned ExceptionPointerRegister; 1769 1770 /// ExceptionSelectorRegister - If set to a physical register, this specifies 1771 /// the register that receives the exception typeid on entry to a landing 1772 /// pad. 1773 unsigned ExceptionSelectorRegister; 1774 1775 /// RegClassForVT - This indicates the default register class to use for 1776 /// each ValueType the target supports natively. 1777 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; 1778 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; 1779 EVT RegisterTypeForVT[MVT::LAST_VALUETYPE]; 1780 1781 /// RepRegClassForVT - This indicates the "representative" register class to 1782 /// use for each ValueType the target supports natively. This information is 1783 /// used by the scheduler to track register pressure. By default, the 1784 /// representative register class is the largest legal super-reg register 1785 /// class of the register class of the specified type. e.g. On x86, i8, i16, 1786 /// and i32's representative class would be GR32. 1787 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE]; 1788 1789 /// RepRegClassCostForVT - This indicates the "cost" of the "representative" 1790 /// register class for each ValueType. The cost is used by the scheduler to 1791 /// approximate register pressure. 1792 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE]; 1793 1794 /// TransformToType - For any value types we are promoting or expanding, this 1795 /// contains the value type that we are changing to. For Expanded types, this 1796 /// contains one step of the expand (e.g. i64 -> i32), even if there are 1797 /// multiple steps required (e.g. i64 -> i16). For types natively supported 1798 /// by the system, this holds the same type (e.g. i32 -> i32). 1799 EVT TransformToType[MVT::LAST_VALUETYPE]; 1800 1801 /// OpActions - For each operation and each value type, keep a LegalizeAction 1802 /// that indicates how instruction selection should deal with the operation. 1803 /// Most operations are Legal (aka, supported natively by the target), but 1804 /// operations that are not should be described. Note that operations on 1805 /// non-legal value types are not described here. 1806 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END]; 1807 1808 /// LoadExtActions - For each load extension type and each value type, 1809 /// keep a LegalizeAction that indicates how instruction selection should deal 1810 /// with a load of a specific value type and extension type. 1811 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE]; 1812 1813 /// TruncStoreActions - For each value type pair keep a LegalizeAction that 1814 /// indicates whether a truncating store of a specific value type and 1815 /// truncating type is legal. 1816 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]; 1817 1818 /// IndexedModeActions - For each indexed mode and each value type, 1819 /// keep a pair of LegalizeAction that indicates how instruction 1820 /// selection should deal with the load / store. The first dimension is the 1821 /// value_type for the reference. The second dimension represents the various 1822 /// modes for load store. 1823 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE]; 1824 1825 /// CondCodeActions - For each condition code (ISD::CondCode) keep a 1826 /// LegalizeAction that indicates how instruction selection should 1827 /// deal with the condition code. 1828 uint64_t CondCodeActions[ISD::SETCC_INVALID]; 1829 1830 ValueTypeActionImpl ValueTypeActions; 1831 1832 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind; 1833 1834 LegalizeKind 1835 getTypeConversion(LLVMContext &Context, EVT VT) const { 1836 // If this is a simple type, use the ComputeRegisterProp mechanism. 1837 if (VT.isSimple()) { 1838 assert((unsigned)VT.getSimpleVT().SimpleTy < 1839 array_lengthof(TransformToType)); 1840 EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy]; 1841 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(VT.getSimpleVT()); 1842 1843 assert( 1844 (!(NVT.isSimple() && LA != TypeLegal) || 1845 ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger) 1846 && "Promote may not follow Expand or Promote"); 1847 1848 return LegalizeKind(LA, NVT); 1849 } 1850 1851 // Handle Extended Scalar Types. 1852 if (!VT.isVector()) { 1853 assert(VT.isInteger() && "Float types must be simple"); 1854 unsigned BitSize = VT.getSizeInBits(); 1855 // First promote to a power-of-two size, then expand if necessary. 1856 if (BitSize < 8 || !isPowerOf2_32(BitSize)) { 1857 EVT NVT = VT.getRoundIntegerType(Context); 1858 assert(NVT != VT && "Unable to round integer VT"); 1859 LegalizeKind NextStep = getTypeConversion(Context, NVT); 1860 // Avoid multi-step promotion. 1861 if (NextStep.first == TypePromoteInteger) return NextStep; 1862 // Return rounded integer type. 1863 return LegalizeKind(TypePromoteInteger, NVT); 1864 } 1865 1866 return LegalizeKind(TypeExpandInteger, 1867 EVT::getIntegerVT(Context, VT.getSizeInBits()/2)); 1868 } 1869 1870 // Handle vector types. 1871 unsigned NumElts = VT.getVectorNumElements(); 1872 EVT EltVT = VT.getVectorElementType(); 1873 1874 // Vectors with only one element are always scalarized. 1875 if (NumElts == 1) 1876 return LegalizeKind(TypeScalarizeVector, EltVT); 1877 1878 // If we allow the promotion of vector elements using a flag, 1879 // then try to widen vector elements until a legal type is found. 1880 if (mayPromoteElements && EltVT.isInteger()) { 1881 // Vectors with a number of elements that is not a power of two are always 1882 // widened, for example <3 x float> -> <4 x float>. 1883 if (!VT.isPow2VectorType()) { 1884 NumElts = (unsigned)NextPowerOf2(NumElts); 1885 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); 1886 return LegalizeKind(TypeWidenVector, NVT); 1887 } 1888 1889 // Examine the element type. 1890 LegalizeKind LK = getTypeConversion(Context, EltVT); 1891 1892 // If type is to be expanded, split the vector. 1893 // <4 x i140> -> <2 x i140> 1894 if (LK.first == TypeExpandInteger) 1895 return LegalizeKind(TypeSplitVector, 1896 EVT::getVectorVT(Context, EltVT, NumElts / 2)); 1897 1898 // Promote the integer element types until a legal vector type is found 1899 // or until the element integer type is too big. If a legal type was not 1900 // found, fallback to the usual mechanism of widening/splitting the 1901 // vector. 1902 while (1) { 1903 // Increase the bitwidth of the element to the next pow-of-two 1904 // (which is greater than 8 bits). 1905 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits() 1906 ).getRoundIntegerType(Context); 1907 1908 // Stop trying when getting a non-simple element type. 1909 // Note that vector elements may be greater than legal vector element 1910 // types. Example: X86 XMM registers hold 64bit element on 32bit systems. 1911 if (!EltVT.isSimple()) break; 1912 1913 // Build a new vector type and check if it is legal. 1914 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1915 // Found a legal promoted vector type. 1916 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) 1917 return LegalizeKind(TypePromoteInteger, 1918 EVT::getVectorVT(Context, EltVT, NumElts)); 1919 } 1920 } 1921 1922 // Try to widen the vector until a legal type is found. 1923 // If there is no wider legal type, split the vector. 1924 while (1) { 1925 // Round up to the next power of 2. 1926 NumElts = (unsigned)NextPowerOf2(NumElts); 1927 1928 // If there is no simple vector type with this many elements then there 1929 // cannot be a larger legal vector type. Note that this assumes that 1930 // there are no skipped intermediate vector types in the simple types. 1931 if (!EltVT.isSimple()) break; 1932 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); 1933 if (LargerVector == MVT()) break; 1934 1935 // If this type is legal then widen the vector. 1936 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) 1937 return LegalizeKind(TypeWidenVector, LargerVector); 1938 } 1939 1940 // Widen odd vectors to next power of two. 1941 if (!VT.isPow2VectorType()) { 1942 EVT NVT = VT.getPow2VectorType(Context); 1943 return LegalizeKind(TypeWidenVector, NVT); 1944 } 1945 1946 // Vectors with illegal element types are expanded. 1947 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); 1948 return LegalizeKind(TypeSplitVector, NVT); 1949 } 1950 1951 std::vector<std::pair<EVT, const TargetRegisterClass*> > AvailableRegClasses; 1952 1953 /// TargetDAGCombineArray - Targets can specify ISD nodes that they would 1954 /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), 1955 /// which sets a bit in this array. 1956 unsigned char 1957 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 1958 1959 /// PromoteToType - For operations that must be promoted to a specific type, 1960 /// this holds the destination type. This map should be sparse, so don't hold 1961 /// it as an array. 1962 /// 1963 /// Targets add entries to this map with AddPromotedToType(..), clients access 1964 /// this with getTypeToPromoteTo(..). 1965 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 1966 PromoteToType; 1967 1968 /// LibcallRoutineNames - Stores the name each libcall. 1969 /// 1970 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; 1971 1972 /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result 1973 /// of each of the comparison libcall against zero. 1974 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 1975 1976 /// LibcallCallingConvs - Stores the CallingConv that should be used for each 1977 /// libcall. 1978 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 1979 1980 protected: 1981 /// When lowering \@llvm.memset this field specifies the maximum number of 1982 /// store operations that may be substituted for the call to memset. Targets 1983 /// must set this value based on the cost threshold for that target. Targets 1984 /// should assume that the memset will be done using as many of the largest 1985 /// store operations first, followed by smaller ones, if necessary, per 1986 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 1987 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 1988 /// store. This only applies to setting a constant array of a constant size. 1989 /// @brief Specify maximum number of store instructions per memset call. 1990 unsigned maxStoresPerMemset; 1991 1992 /// Maximum number of stores operations that may be substituted for the call 1993 /// to memset, used for functions with OptSize attribute. 1994 unsigned maxStoresPerMemsetOptSize; 1995 1996 /// When lowering \@llvm.memcpy this field specifies the maximum number of 1997 /// store operations that may be substituted for a call to memcpy. Targets 1998 /// must set this value based on the cost threshold for that target. Targets 1999 /// should assume that the memcpy will be done using as many of the largest 2000 /// store operations first, followed by smaller ones, if necessary, per 2001 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 2002 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 2003 /// and one 1-byte store. This only applies to copying a constant array of 2004 /// constant size. 2005 /// @brief Specify maximum bytes of store instructions per memcpy call. 2006 unsigned maxStoresPerMemcpy; 2007 2008 /// Maximum number of store operations that may be substituted for a call 2009 /// to memcpy, used for functions with OptSize attribute. 2010 unsigned maxStoresPerMemcpyOptSize; 2011 2012 /// When lowering \@llvm.memmove this field specifies the maximum number of 2013 /// store instructions that may be substituted for a call to memmove. Targets 2014 /// must set this value based on the cost threshold for that target. Targets 2015 /// should assume that the memmove will be done using as many of the largest 2016 /// store operations first, followed by smaller ones, if necessary, per 2017 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 2018 /// with 8-bit alignment would result in nine 1-byte stores. This only 2019 /// applies to copying a constant array of constant size. 2020 /// @brief Specify maximum bytes of store instructions per memmove call. 2021 unsigned maxStoresPerMemmove; 2022 2023 /// Maximum number of store instructions that may be substituted for a call 2024 /// to memmove, used for functions with OpSize attribute. 2025 unsigned maxStoresPerMemmoveOptSize; 2026 2027 /// This field specifies whether the target can benefit from code placement 2028 /// optimization. 2029 bool benefitFromCodePlacementOpt; 2030 2031 private: 2032 /// isLegalRC - Return true if the value types that can be represented by the 2033 /// specified register class are all legal. 2034 bool isLegalRC(const TargetRegisterClass *RC) const; 2035 2036 /// hasLegalSuperRegRegClasses - Return true if the specified register class 2037 /// has one or more super-reg register classes that are legal. 2038 bool hasLegalSuperRegRegClasses(const TargetRegisterClass *RC) const; 2039 }; 2040 2041 /// GetReturnInfo - Given an LLVM IR type and return type attributes, 2042 /// compute the return value EVTs and flags, and optionally also 2043 /// the offsets, if the return value is being lowered to memory. 2044 void GetReturnInfo(Type* ReturnType, Attributes attr, 2045 SmallVectorImpl<ISD::OutputArg> &Outs, 2046 const TargetLowering &TLI, 2047 SmallVectorImpl<uint64_t> *Offsets = 0); 2048 2049 } // end llvm namespace 2050 2051 #endif 2052