1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that X86 uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef X86ISELLOWERING_H 16 #define X86ISELLOWERING_H 17 18 #include "X86MachineFunctionInfo.h" 19 #include "X86RegisterInfo.h" 20 #include "X86Subtarget.h" 21 #include "llvm/CodeGen/CallingConvLower.h" 22 #include "llvm/CodeGen/FastISel.h" 23 #include "llvm/CodeGen/SelectionDAG.h" 24 #include "llvm/Target/TargetLowering.h" 25 #include "llvm/Target/TargetOptions.h" 26 27 namespace llvm { 28 namespace X86ISD { 29 // X86 Specific DAG Nodes 30 enum NodeType { 31 // Start the numbering where the builtin ops leave off. 32 FIRST_NUMBER = ISD::BUILTIN_OP_END, 33 34 /// BSF - Bit scan forward. 35 /// BSR - Bit scan reverse. 36 BSF, 37 BSR, 38 39 /// SHLD, SHRD - Double shift instructions. These correspond to 40 /// X86::SHLDxx and X86::SHRDxx instructions. 41 SHLD, 42 SHRD, 43 44 /// FAND - Bitwise logical AND of floating point values. This corresponds 45 /// to X86::ANDPS or X86::ANDPD. 46 FAND, 47 48 /// FOR - Bitwise logical OR of floating point values. This corresponds 49 /// to X86::ORPS or X86::ORPD. 50 FOR, 51 52 /// FXOR - Bitwise logical XOR of floating point values. This corresponds 53 /// to X86::XORPS or X86::XORPD. 54 FXOR, 55 56 /// FAND - Bitwise logical ANDNOT of floating point values. This 57 /// corresponds to X86::ANDNPS or X86::ANDNPD. 58 FANDN, 59 60 /// FSRL - Bitwise logical right shift of floating point values. These 61 /// corresponds to X86::PSRLDQ. 62 FSRL, 63 64 /// CALL - These operations represent an abstract X86 call 65 /// instruction, which includes a bunch of information. In particular the 66 /// operands of these node are: 67 /// 68 /// #0 - The incoming token chain 69 /// #1 - The callee 70 /// #2 - The number of arg bytes the caller pushes on the stack. 71 /// #3 - The number of arg bytes the callee pops off the stack. 72 /// #4 - The value to pass in AL/AX/EAX (optional) 73 /// #5 - The value to pass in DL/DX/EDX (optional) 74 /// 75 /// The result values of these nodes are: 76 /// 77 /// #0 - The outgoing token chain 78 /// #1 - The first register result value (optional) 79 /// #2 - The second register result value (optional) 80 /// 81 CALL, 82 83 /// RDTSC_DAG - This operation implements the lowering for 84 /// readcyclecounter 85 RDTSC_DAG, 86 87 /// X86 compare and logical compare instructions. 88 CMP, COMI, UCOMI, 89 90 /// X86 bit-test instructions. 91 BT, 92 93 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS 94 /// operand, usually produced by a CMP instruction. 95 SETCC, 96 97 // Same as SETCC except it's materialized with a sbb and the value is all 98 // one's or all zero's. 99 SETCC_CARRY, // R = carry_bit ? ~0 : 0 100 101 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD. 102 /// Operands are two FP values to compare; result is a mask of 103 /// 0s or 1s. Generally DTRT for C/C++ with NaNs. 104 FSETCCss, FSETCCsd, 105 106 /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values, 107 /// result in an integer GPR. Needs masking for scalar result. 108 FGETSIGNx86, 109 110 /// X86 conditional moves. Operand 0 and operand 1 are the two values 111 /// to select from. Operand 2 is the condition code, and operand 3 is the 112 /// flag operand produced by a CMP or TEST instruction. It also writes a 113 /// flag result. 114 CMOV, 115 116 /// X86 conditional branches. Operand 0 is the chain operand, operand 1 117 /// is the block to branch if condition is true, operand 2 is the 118 /// condition code, and operand 3 is the flag operand produced by a CMP 119 /// or TEST instruction. 120 BRCOND, 121 122 /// Return with a flag operand. Operand 0 is the chain operand, operand 123 /// 1 is the number of bytes of stack to pop. 124 RET_FLAG, 125 126 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx. 127 REP_STOS, 128 129 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx. 130 REP_MOVS, 131 132 /// GlobalBaseReg - On Darwin, this node represents the result of the popl 133 /// at function entry, used for PIC code. 134 GlobalBaseReg, 135 136 /// Wrapper - A wrapper node for TargetConstantPool, 137 /// TargetExternalSymbol, and TargetGlobalAddress. 138 Wrapper, 139 140 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP 141 /// relative displacements. 142 WrapperRIP, 143 144 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector 145 /// to an MMX vector. If you think this is too close to the previous 146 /// mnemonic, so do I; blame Intel. 147 MOVDQ2Q, 148 149 /// MMX_MOVD2W - Copies a 32-bit value from the low word of a MMX 150 /// vector to a GPR. 151 MMX_MOVD2W, 152 153 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to 154 /// i32, corresponds to X86::PEXTRB. 155 PEXTRB, 156 157 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to 158 /// i32, corresponds to X86::PEXTRW. 159 PEXTRW, 160 161 /// INSERTPS - Insert any element of a 4 x float vector into any element 162 /// of a destination 4 x floatvector. 163 INSERTPS, 164 165 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector, 166 /// corresponds to X86::PINSRB. 167 PINSRB, 168 169 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, 170 /// corresponds to X86::PINSRW. 171 PINSRW, MMX_PINSRW, 172 173 /// PSHUFB - Shuffle 16 8-bit values within a vector. 174 PSHUFB, 175 176 /// ANDNP - Bitwise Logical AND NOT of Packed FP values. 177 ANDNP, 178 179 /// PSIGN - Copy integer sign. 180 PSIGN, 181 182 /// BLENDV - Blend where the selector is a register. 183 BLENDV, 184 185 /// BLENDI - Blend where the selector is an immediate. 186 BLENDI, 187 188 // SUBUS - Integer sub with unsigned saturation. 189 SUBUS, 190 191 /// HADD - Integer horizontal add. 192 HADD, 193 194 /// HSUB - Integer horizontal sub. 195 HSUB, 196 197 /// FHADD - Floating point horizontal add. 198 FHADD, 199 200 /// FHSUB - Floating point horizontal sub. 201 FHSUB, 202 203 /// UMAX, UMIN - Unsigned integer max and min. 204 UMAX, UMIN, 205 206 /// SMAX, SMIN - Signed integer max and min. 207 SMAX, SMIN, 208 209 /// FMAX, FMIN - Floating point max and min. 210 /// 211 FMAX, FMIN, 212 213 /// FMAXC, FMINC - Commutative FMIN and FMAX. 214 FMAXC, FMINC, 215 216 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal 217 /// approximation. Note that these typically require refinement 218 /// in order to obtain suitable precision. 219 FRSQRT, FRCP, 220 221 // TLSADDR - Thread Local Storage. 222 TLSADDR, 223 224 // TLSBASEADDR - Thread Local Storage. A call to get the start address 225 // of the TLS block for the current module. 226 TLSBASEADDR, 227 228 // TLSCALL - Thread Local Storage. When calling to an OS provided 229 // thunk at the address from an earlier relocation. 230 TLSCALL, 231 232 // EH_RETURN - Exception Handling helpers. 233 EH_RETURN, 234 235 // EH_SJLJ_SETJMP - SjLj exception handling setjmp. 236 EH_SJLJ_SETJMP, 237 238 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. 239 EH_SJLJ_LONGJMP, 240 241 /// TC_RETURN - Tail call return. See X86TargetLowering::LowerCall for 242 /// the list of operands. 243 TC_RETURN, 244 245 // VZEXT_MOVL - Vector move low and zero extend. 246 VZEXT_MOVL, 247 248 // VSEXT_MOVL - Vector move low and sign extend. 249 VSEXT_MOVL, 250 251 // VZEXT - Vector integer zero-extend. 252 VZEXT, 253 254 // VSEXT - Vector integer signed-extend. 255 VSEXT, 256 257 // VFPEXT - Vector FP extend. 258 VFPEXT, 259 260 // VFPROUND - Vector FP round. 261 VFPROUND, 262 263 // VSHL, VSRL - 128-bit vector logical left / right shift 264 VSHLDQ, VSRLDQ, 265 266 // VSHL, VSRL, VSRA - Vector shift elements 267 VSHL, VSRL, VSRA, 268 269 // VSHLI, VSRLI, VSRAI - Vector shift elements by immediate 270 VSHLI, VSRLI, VSRAI, 271 272 // CMPP - Vector packed double/float comparison. 273 CMPP, 274 275 // PCMP* - Vector integer comparisons. 276 PCMPEQ, PCMPGT, 277 278 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results. 279 ADD, SUB, ADC, SBB, SMUL, 280 INC, DEC, OR, XOR, AND, 281 282 BLSI, // BLSI - Extract lowest set isolated bit 283 BLSMSK, // BLSMSK - Get mask up to lowest set bit 284 BLSR, // BLSR - Reset lowest set bit 285 286 UMUL, // LOW, HI, FLAGS = umul LHS, RHS 287 288 // MUL_IMM - X86 specific multiply by immediate. 289 MUL_IMM, 290 291 // PTEST - Vector bitwise comparisons 292 PTEST, 293 294 // TESTP - Vector packed fp sign bitwise comparisons 295 TESTP, 296 297 // OR/AND test for masks 298 KORTEST, 299 KTEST, 300 301 // Several flavors of instructions with vector shuffle behaviors. 302 PALIGNR, 303 PSHUFD, 304 PSHUFHW, 305 PSHUFLW, 306 SHUFP, 307 MOVDDUP, 308 MOVSHDUP, 309 MOVSLDUP, 310 MOVLHPS, 311 MOVLHPD, 312 MOVHLPS, 313 MOVLPS, 314 MOVLPD, 315 MOVSD, 316 MOVSS, 317 UNPCKL, 318 UNPCKH, 319 VPERMILP, 320 VPERMV, 321 VPERMI, 322 VPERM2X128, 323 VBROADCAST, 324 // masked broadcast 325 VBROADCASTM, 326 327 // PMULUDQ - Vector multiply packed unsigned doubleword integers 328 PMULUDQ, 329 330 // FMA nodes 331 FMADD, 332 FNMADD, 333 FMSUB, 334 FNMSUB, 335 FMADDSUB, 336 FMSUBADD, 337 338 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, 339 // according to %al. An operator is needed so that this can be expanded 340 // with control flow. 341 VASTART_SAVE_XMM_REGS, 342 343 // WIN_ALLOCA - Windows's _chkstk call to do stack probing. 344 WIN_ALLOCA, 345 346 // SEG_ALLOCA - For allocating variable amounts of stack space when using 347 // segmented stacks. Check if the current stacklet has enough space, and 348 // falls back to heap allocation if not. 349 SEG_ALLOCA, 350 351 // WIN_FTOL - Windows's _ftol2 runtime routine to do fptoui. 352 WIN_FTOL, 353 354 // Memory barrier 355 MEMBARRIER, 356 MFENCE, 357 SFENCE, 358 LFENCE, 359 360 // FNSTSW16r - Store FP status word into i16 register. 361 FNSTSW16r, 362 363 // SAHF - Store contents of %ah into %eflags. 364 SAHF, 365 366 // RDRAND - Get a random integer and indicate whether it is valid in CF. 367 RDRAND, 368 369 // RDSEED - Get a NIST SP800-90B & C compliant random integer and 370 // indicate whether it is valid in CF. 371 RDSEED, 372 373 // PCMP*STRI 374 PCMPISTRI, 375 PCMPESTRI, 376 377 // XTEST - Test if in transactional execution. 378 XTEST, 379 380 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, 381 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - 382 // Atomic 64-bit binary operations. 383 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, 384 ATOMSUB64_DAG, 385 ATOMOR64_DAG, 386 ATOMXOR64_DAG, 387 ATOMAND64_DAG, 388 ATOMNAND64_DAG, 389 ATOMMAX64_DAG, 390 ATOMMIN64_DAG, 391 ATOMUMAX64_DAG, 392 ATOMUMIN64_DAG, 393 ATOMSWAP64_DAG, 394 395 // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap. 396 LCMPXCHG_DAG, 397 LCMPXCHG8_DAG, 398 LCMPXCHG16_DAG, 399 400 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend. 401 VZEXT_LOAD, 402 403 // FNSTCW16m - Store FP control world into i16 memory. 404 FNSTCW16m, 405 406 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the 407 /// integer destination in memory and a FP reg source. This corresponds 408 /// to the X86::FIST*m instructions and the rounding mode change stuff. It 409 /// has two inputs (token chain and address) and two outputs (int value 410 /// and token chain). 411 FP_TO_INT16_IN_MEM, 412 FP_TO_INT32_IN_MEM, 413 FP_TO_INT64_IN_MEM, 414 415 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the 416 /// integer source in memory and FP reg result. This corresponds to the 417 /// X86::FILD*m instructions. It has three inputs (token chain, address, 418 /// and source type) and two outputs (FP value and token chain). FILD_FLAG 419 /// also produces a flag). 420 FILD, 421 FILD_FLAG, 422 423 /// FLD - This instruction implements an extending load to FP stack slots. 424 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain 425 /// operand, ptr to load from, and a ValueType node indicating the type 426 /// to load to. 427 FLD, 428 429 /// FST - This instruction implements a truncating store to FP stack 430 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a 431 /// chain operand, value to store, address, and a ValueType to store it 432 /// as. 433 FST, 434 435 /// VAARG_64 - This instruction grabs the address of the next argument 436 /// from a va_list. (reads and modifies the va_list in memory) 437 VAARG_64 438 439 // WARNING: Do not add anything in the end unless you want the node to 440 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be 441 // thought as target memory ops! 442 }; 443 } 444 445 /// Define some predicates that are used for node matching. 446 namespace X86 { 447 /// isVEXTRACT128Index - Return true if the specified 448 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is 449 /// suitable for input to VEXTRACTF128, VEXTRACTI128 instructions. 450 bool isVEXTRACT128Index(SDNode *N); 451 452 /// isVINSERT128Index - Return true if the specified 453 /// INSERT_SUBVECTOR operand specifies a subvector insert that is 454 /// suitable for input to VINSERTF128, VINSERTI128 instructions. 455 bool isVINSERT128Index(SDNode *N); 456 457 /// isVEXTRACT256Index - Return true if the specified 458 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is 459 /// suitable for input to VEXTRACTF64X4, VEXTRACTI64X4 instructions. 460 bool isVEXTRACT256Index(SDNode *N); 461 462 /// isVINSERT256Index - Return true if the specified 463 /// INSERT_SUBVECTOR operand specifies a subvector insert that is 464 /// suitable for input to VINSERTF64X4, VINSERTI64X4 instructions. 465 bool isVINSERT256Index(SDNode *N); 466 467 /// getExtractVEXTRACT128Immediate - Return the appropriate 468 /// immediate to extract the specified EXTRACT_SUBVECTOR index 469 /// with VEXTRACTF128, VEXTRACTI128 instructions. 470 unsigned getExtractVEXTRACT128Immediate(SDNode *N); 471 472 /// getInsertVINSERT128Immediate - Return the appropriate 473 /// immediate to insert at the specified INSERT_SUBVECTOR index 474 /// with VINSERTF128, VINSERT128 instructions. 475 unsigned getInsertVINSERT128Immediate(SDNode *N); 476 477 /// getExtractVEXTRACT256Immediate - Return the appropriate 478 /// immediate to extract the specified EXTRACT_SUBVECTOR index 479 /// with VEXTRACTF64X4, VEXTRACTI64x4 instructions. 480 unsigned getExtractVEXTRACT256Immediate(SDNode *N); 481 482 /// getInsertVINSERT256Immediate - Return the appropriate 483 /// immediate to insert at the specified INSERT_SUBVECTOR index 484 /// with VINSERTF64x4, VINSERTI64x4 instructions. 485 unsigned getInsertVINSERT256Immediate(SDNode *N); 486 487 /// isZeroNode - Returns true if Elt is a constant zero or a floating point 488 /// constant +0.0. 489 bool isZeroNode(SDValue Elt); 490 491 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be 492 /// fit into displacement field of the instruction. 493 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, 494 bool hasSymbolicDisplacement = true); 495 496 497 /// isCalleePop - Determines whether the callee is required to pop its 498 /// own arguments. Callee pop is necessary to support tail calls. 499 bool isCalleePop(CallingConv::ID CallingConv, 500 bool is64Bit, bool IsVarArg, bool TailCallOpt); 501 } 502 503 //===--------------------------------------------------------------------===// 504 // X86TargetLowering - X86 Implementation of the TargetLowering interface 505 class X86TargetLowering : public TargetLowering { 506 public: 507 explicit X86TargetLowering(X86TargetMachine &TM); 508 509 virtual unsigned getJumpTableEncoding() const; 510 511 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i8; } 512 513 virtual const MCExpr * 514 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, 515 const MachineBasicBlock *MBB, unsigned uid, 516 MCContext &Ctx) const; 517 518 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC 519 /// jumptable. 520 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 521 SelectionDAG &DAG) const; 522 virtual const MCExpr * 523 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 524 unsigned JTI, MCContext &Ctx) const; 525 526 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 527 /// function arguments in the caller parameter area. For X86, aggregates 528 /// that contains are placed at 16-byte boundaries while the rest are at 529 /// 4-byte boundaries. 530 virtual unsigned getByValTypeAlignment(Type *Ty) const; 531 532 /// getOptimalMemOpType - Returns the target specific optimal type for load 533 /// and store operations as a result of memset, memcpy, and memmove 534 /// lowering. If DstAlign is zero that means it's safe to destination 535 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 536 /// means there isn't a need to check it against alignment requirement, 537 /// probably because the source does not need to be loaded. If 'IsMemset' is 538 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 539 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 540 /// source is constant so it does not need to be loaded. 541 /// It returns EVT::Other if the type should be determined using generic 542 /// target-independent logic. 543 virtual EVT 544 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 545 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, 546 MachineFunction &MF) const; 547 548 /// isSafeMemOpType - Returns true if it's safe to use load / store of the 549 /// specified type to expand memcpy / memset inline. This is mostly true 550 /// for all types except for some special cases. For example, on X86 551 /// targets without SSE2 f64 load / store are done with fldl / fstpl which 552 /// also does type conversion. Note the specified type doesn't have to be 553 /// legal as the hook is used before type legalization. 554 virtual bool isSafeMemOpType(MVT VT) const; 555 556 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 557 /// unaligned memory accesses. of the specified type. Returns whether it 558 /// is "fast" by reference in the second argument. 559 virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const; 560 561 /// LowerOperation - Provide custom lowering hooks for some operations. 562 /// 563 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 564 565 /// ReplaceNodeResults - Replace the results of node with an illegal result 566 /// type with new values built out of custom code. 567 /// 568 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 569 SelectionDAG &DAG) const; 570 571 572 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 573 574 /// isTypeDesirableForOp - Return true if the target has native support for 575 /// the specified value type and it is 'desirable' to use the type for the 576 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 577 /// instruction encodings are longer and some i16 instructions are slow. 578 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; 579 580 /// isTypeDesirable - Return true if the target has native support for the 581 /// specified value type and it is 'desirable' to use the type. e.g. On x86 582 /// i16 is legal, but undesirable since i16 instruction encodings are longer 583 /// and some i16 instructions are slow. 584 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; 585 586 virtual MachineBasicBlock * 587 EmitInstrWithCustomInserter(MachineInstr *MI, 588 MachineBasicBlock *MBB) const; 589 590 591 /// getTargetNodeName - This method returns the name of a target specific 592 /// DAG node. 593 virtual const char *getTargetNodeName(unsigned Opcode) const; 594 595 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 596 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; 597 598 /// computeMaskedBitsForTargetNode - Determine which of the bits specified 599 /// in Mask are known to be either zero or one and return them in the 600 /// KnownZero/KnownOne bitsets. 601 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 602 APInt &KnownZero, 603 APInt &KnownOne, 604 const SelectionDAG &DAG, 605 unsigned Depth = 0) const; 606 607 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the 608 // operation that are sign bits. 609 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 610 unsigned Depth) const; 611 612 virtual bool 613 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 614 615 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; 616 617 virtual bool ExpandInlineAsm(CallInst *CI) const; 618 619 ConstraintType getConstraintType(const std::string &Constraint) const; 620 621 /// Examine constraint string and operand type and determine a weight value. 622 /// The operand object must already have been set up with the operand type. 623 virtual ConstraintWeight getSingleConstraintMatchWeight( 624 AsmOperandInfo &info, const char *constraint) const; 625 626 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 627 628 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 629 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 630 /// true it means one of the asm constraint of the inline asm instruction 631 /// being processed is 'm'. 632 virtual void LowerAsmOperandForConstraint(SDValue Op, 633 std::string &Constraint, 634 std::vector<SDValue> &Ops, 635 SelectionDAG &DAG) const; 636 637 /// getRegForInlineAsmConstraint - Given a physical register constraint 638 /// (e.g. {edx}), return the register number and the register class for the 639 /// register. This should only be used for C_Register constraints. On 640 /// error, this returns a register number of 0. 641 std::pair<unsigned, const TargetRegisterClass*> 642 getRegForInlineAsmConstraint(const std::string &Constraint, 643 MVT VT) const; 644 645 /// isLegalAddressingMode - Return true if the addressing mode represented 646 /// by AM is legal for this target, for a load/store of the specified type. 647 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 648 649 /// isLegalICmpImmediate - Return true if the specified immediate is legal 650 /// icmp immediate, that is the target has icmp instructions which can 651 /// compare a register against the immediate without having to materialize 652 /// the immediate into a register. 653 virtual bool isLegalICmpImmediate(int64_t Imm) const; 654 655 /// isLegalAddImmediate - Return true if the specified immediate is legal 656 /// add immediate, that is the target has add instructions which can 657 /// add a register and the immediate without having to materialize 658 /// the immediate into a register. 659 virtual bool isLegalAddImmediate(int64_t Imm) const; 660 661 /// isTruncateFree - Return true if it's free to truncate a value of 662 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in 663 /// register EAX to i16 by referencing its sub-register AX. 664 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; 665 virtual bool isTruncateFree(EVT VT1, EVT VT2) const; 666 667 virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const; 668 669 /// isZExtFree - Return true if any actual instruction that defines a 670 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result 671 /// register. This does not necessarily include registers defined in 672 /// unknown ways, such as incoming arguments, or copies from unknown 673 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this 674 /// does not necessarily apply to truncate instructions. e.g. on x86-64, 675 /// all instructions that define 32-bit values implicit zero-extend the 676 /// result out to 64 bits. 677 virtual bool isZExtFree(Type *Ty1, Type *Ty2) const; 678 virtual bool isZExtFree(EVT VT1, EVT VT2) const; 679 virtual bool isZExtFree(SDValue Val, EVT VT2) const; 680 681 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 682 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 683 /// expanded to FMAs when this method returns true, otherwise fmuladd is 684 /// expanded to fmul + fadd. 685 virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const; 686 687 /// isNarrowingProfitable - Return true if it's profitable to narrow 688 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow 689 /// from i32 to i8 but not from i32 to i16. 690 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; 691 692 /// isFPImmLegal - Returns true if the target can instruction select the 693 /// specified FP immediate natively. If false, the legalizer will 694 /// materialize the FP immediate as a load from a constant pool. 695 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 696 697 /// isShuffleMaskLegal - Targets can use this to indicate that they only 698 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 699 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask 700 /// values are assumed to be legal. 701 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask, 702 EVT VT) const; 703 704 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is 705 /// used by Targets can use this to indicate if there is a suitable 706 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant 707 /// pool entry. 708 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask, 709 EVT VT) const; 710 711 /// ShouldShrinkFPConstant - If true, then instruction selection should 712 /// seek to shrink the FP constant of the specified type to a smaller type 713 /// in order to save space and / or reduce runtime. 714 virtual bool ShouldShrinkFPConstant(EVT VT) const { 715 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more 716 // expensive than a straight movsd. On the other hand, it's important to 717 // shrink long double fp constant since fldt is very slow. 718 return !X86ScalarSSEf64 || VT == MVT::f80; 719 } 720 721 const X86Subtarget* getSubtarget() const { 722 return Subtarget; 723 } 724 725 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is 726 /// computed in an SSE register, not on the X87 floating point stack. 727 bool isScalarFPTypeInSSEReg(EVT VT) const { 728 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2 729 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 730 } 731 732 /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine 733 /// for fptoui. 734 bool isTargetFTOL() const { 735 return Subtarget->isTargetWindows() && !Subtarget->is64Bit(); 736 } 737 738 /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be 739 /// used for fptoui to the given type. 740 bool isIntegerTypeFTOL(EVT VT) const { 741 return isTargetFTOL() && VT == MVT::i64; 742 } 743 744 /// createFastISel - This method returns a target specific FastISel object, 745 /// or null if the target does not support "fast" ISel. 746 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 747 const TargetLibraryInfo *libInfo) const; 748 749 /// getStackCookieLocation - Return true if the target stores stack 750 /// protector cookies at a fixed offset in some non-standard address 751 /// space, and populates the address space and offset as 752 /// appropriate. 753 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; 754 755 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, 756 SelectionDAG &DAG) const; 757 758 /// \brief Reset the operation actions based on target options. 759 virtual void resetOperationActions(); 760 761 protected: 762 std::pair<const TargetRegisterClass*, uint8_t> 763 findRepresentativeClass(MVT VT) const; 764 765 private: 766 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 767 /// make the right decision when generating code for different targets. 768 const X86Subtarget *Subtarget; 769 const DataLayout *TD; 770 771 /// Used to store the TargetOptions so that we don't waste time resetting 772 /// the operation actions unless we have to. 773 TargetOptions TO; 774 775 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 776 /// floating point ops. 777 /// When SSE is available, use it for f32 operations. 778 /// When SSE2 is available, use it for f64 operations. 779 bool X86ScalarSSEf32; 780 bool X86ScalarSSEf64; 781 782 /// LegalFPImmediates - A list of legal fp immediates. 783 std::vector<APFloat> LegalFPImmediates; 784 785 /// addLegalFPImmediate - Indicate that this x86 target can instruction 786 /// select the specified FP immediate natively. 787 void addLegalFPImmediate(const APFloat& Imm) { 788 LegalFPImmediates.push_back(Imm); 789 } 790 791 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 792 CallingConv::ID CallConv, bool isVarArg, 793 const SmallVectorImpl<ISD::InputArg> &Ins, 794 SDLoc dl, SelectionDAG &DAG, 795 SmallVectorImpl<SDValue> &InVals) const; 796 SDValue LowerMemArgument(SDValue Chain, 797 CallingConv::ID CallConv, 798 const SmallVectorImpl<ISD::InputArg> &ArgInfo, 799 SDLoc dl, SelectionDAG &DAG, 800 const CCValAssign &VA, MachineFrameInfo *MFI, 801 unsigned i) const; 802 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 803 SDLoc dl, SelectionDAG &DAG, 804 const CCValAssign &VA, 805 ISD::ArgFlagsTy Flags) const; 806 807 // Call lowering helpers. 808 809 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 810 /// for tail call optimization. Targets which want to do tail call 811 /// optimization should implement this function. 812 bool IsEligibleForTailCallOptimization(SDValue Callee, 813 CallingConv::ID CalleeCC, 814 bool isVarArg, 815 bool isCalleeStructRet, 816 bool isCallerStructRet, 817 Type *RetTy, 818 const SmallVectorImpl<ISD::OutputArg> &Outs, 819 const SmallVectorImpl<SDValue> &OutVals, 820 const SmallVectorImpl<ISD::InputArg> &Ins, 821 SelectionDAG& DAG) const; 822 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; 823 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, 824 SDValue Chain, bool IsTailCall, bool Is64Bit, 825 int FPDiff, SDLoc dl) const; 826 827 unsigned GetAlignedArgumentStackSize(unsigned StackSize, 828 SelectionDAG &DAG) const; 829 830 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, 831 bool isSigned, 832 bool isReplace) const; 833 834 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, SDLoc dl, 835 SelectionDAG &DAG) const; 836 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 837 SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const; 838 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 839 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 840 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 841 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 842 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 843 SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl, 844 int64_t Offset, SelectionDAG &DAG) const; 845 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 846 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 847 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; 848 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const; 849 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const; 850 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 851 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 852 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; 853 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; 854 SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const; 855 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; 856 SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; 857 SDValue LowerZERO_EXTEND_AVX512(SDValue Op, SelectionDAG &DAG) const; 858 SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; 859 SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, SelectionDAG &DAG) const; 860 SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const; 861 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; 862 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; 863 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; 864 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; 865 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 866 SDValue LowerToBT(SDValue And, ISD::CondCode CC, 867 SDLoc dl, SelectionDAG &DAG) const; 868 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 869 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 870 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; 871 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const; 872 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 873 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 874 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 875 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; 876 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 877 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 878 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; 879 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; 880 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 881 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 882 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 883 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 884 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; 885 SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) const; 886 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; 887 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 888 889 // Utility functions to help LowerVECTOR_SHUFFLE & LowerBUILD_VECTOR 890 SDValue LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const; 891 SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const; 892 SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const; 893 894 SDValue LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const; 895 896 SDValue LowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const; 897 898 virtual SDValue 899 LowerFormalArguments(SDValue Chain, 900 CallingConv::ID CallConv, bool isVarArg, 901 const SmallVectorImpl<ISD::InputArg> &Ins, 902 SDLoc dl, SelectionDAG &DAG, 903 SmallVectorImpl<SDValue> &InVals) const; 904 virtual SDValue 905 LowerCall(CallLoweringInfo &CLI, 906 SmallVectorImpl<SDValue> &InVals) const; 907 908 virtual SDValue 909 LowerReturn(SDValue Chain, 910 CallingConv::ID CallConv, bool isVarArg, 911 const SmallVectorImpl<ISD::OutputArg> &Outs, 912 const SmallVectorImpl<SDValue> &OutVals, 913 SDLoc dl, SelectionDAG &DAG) const; 914 915 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; 916 917 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; 918 919 virtual MVT 920 getTypeForExtArgOrReturn(MVT VT, ISD::NodeType ExtendKind) const; 921 922 virtual bool 923 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 924 bool isVarArg, 925 const SmallVectorImpl<ISD::OutputArg> &Outs, 926 LLVMContext &Context) const; 927 928 /// Utility function to emit atomic-load-arith operations (and, or, xor, 929 /// nand, max, min, umax, umin). It takes the corresponding instruction to 930 /// expand, the associated machine basic block, and the associated X86 931 /// opcodes for reg/reg. 932 MachineBasicBlock *EmitAtomicLoadArith(MachineInstr *MI, 933 MachineBasicBlock *MBB) const; 934 935 /// Utility function to emit atomic-load-arith operations (and, or, xor, 936 /// nand, add, sub, swap) for 64-bit operands on 32-bit target. 937 MachineBasicBlock *EmitAtomicLoadArith6432(MachineInstr *MI, 938 MachineBasicBlock *MBB) const; 939 940 // Utility function to emit the low-level va_arg code for X86-64. 941 MachineBasicBlock *EmitVAARG64WithCustomInserter( 942 MachineInstr *MI, 943 MachineBasicBlock *MBB) const; 944 945 /// Utility function to emit the xmm reg save portion of va_start. 946 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter( 947 MachineInstr *BInstr, 948 MachineBasicBlock *BB) const; 949 950 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, 951 MachineBasicBlock *BB) const; 952 953 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI, 954 MachineBasicBlock *BB) const; 955 956 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI, 957 MachineBasicBlock *BB, 958 bool Is64Bit) const; 959 960 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI, 961 MachineBasicBlock *BB) const; 962 963 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI, 964 MachineBasicBlock *BB) const; 965 966 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI, 967 MachineBasicBlock *MBB) const; 968 969 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI, 970 MachineBasicBlock *MBB) const; 971 972 /// Emit nodes that will be selected as "test Op0,Op0", or something 973 /// equivalent, for use with the given x86 condition code. 974 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; 975 976 /// Emit nodes that will be selected as "cmp Op0,Op1", or something 977 /// equivalent, for use with the given x86 condition code. 978 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, 979 SelectionDAG &DAG) const; 980 981 /// Convert a comparison if required by the subtarget. 982 SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const; 983 }; 984 985 namespace X86 { 986 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 987 const TargetLibraryInfo *libInfo); 988 } 989 } 990 991 #endif // X86ISELLOWERING_H 992