1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines a DAG pattern matching instruction selector for X86, 11 // converting from a legalized dag to a X86 dag. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "X86.h" 16 #include "X86MachineFunctionInfo.h" 17 #include "X86RegisterInfo.h" 18 #include "X86Subtarget.h" 19 #include "X86TargetMachine.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/SelectionDAGISel.h" 24 #include "llvm/Config/llvm-config.h" 25 #include "llvm/IR/ConstantRange.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/IR/Instructions.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/KnownBits.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Target/TargetMachine.h" 36 #include "llvm/Target/TargetOptions.h" 37 #include <stdint.h> 38 using namespace llvm; 39 40 #define DEBUG_TYPE "x86-isel" 41 42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 43 44 //===----------------------------------------------------------------------===// 45 // Pattern Matcher Implementation 46 //===----------------------------------------------------------------------===// 47 48 namespace { 49 /// This corresponds to X86AddressMode, but uses SDValue's instead of register 50 /// numbers for the leaves of the matched tree. 51 struct X86ISelAddressMode { 52 enum { 53 RegBase, 54 FrameIndexBase 55 } BaseType; 56 57 // This is really a union, discriminated by BaseType! 58 SDValue Base_Reg; 59 int Base_FrameIndex; 60 61 unsigned Scale; 62 SDValue IndexReg; 63 int32_t Disp; 64 SDValue Segment; 65 const GlobalValue *GV; 66 const Constant *CP; 67 const BlockAddress *BlockAddr; 68 const char *ES; 69 MCSymbol *MCSym; 70 int JT; 71 unsigned Align; // CP alignment. 72 unsigned char SymbolFlags; // X86II::MO_* 73 74 X86ISelAddressMode() 75 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 76 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr), 77 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {} 78 79 bool hasSymbolicDisplacement() const { 80 return GV != nullptr || CP != nullptr || ES != nullptr || 81 MCSym != nullptr || JT != -1 || BlockAddr != nullptr; 82 } 83 84 bool hasBaseOrIndexReg() const { 85 return BaseType == FrameIndexBase || 86 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr; 87 } 88 89 /// Return true if this addressing mode is already RIP-relative. 90 bool isRIPRelative() const { 91 if (BaseType != RegBase) return false; 92 if (RegisterSDNode *RegNode = 93 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 94 return RegNode->getReg() == X86::RIP; 95 return false; 96 } 97 98 void setBaseReg(SDValue Reg) { 99 BaseType = RegBase; 100 Base_Reg = Reg; 101 } 102 103 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 104 void dump(SelectionDAG *DAG = nullptr) { 105 dbgs() << "X86ISelAddressMode " << this << '\n'; 106 dbgs() << "Base_Reg "; 107 if (Base_Reg.getNode()) 108 Base_Reg.getNode()->dump(DAG); 109 else 110 dbgs() << "nul\n"; 111 if (BaseType == FrameIndexBase) 112 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'; 113 dbgs() << " Scale " << Scale << '\n' 114 << "IndexReg "; 115 if (IndexReg.getNode()) 116 IndexReg.getNode()->dump(DAG); 117 else 118 dbgs() << "nul\n"; 119 dbgs() << " Disp " << Disp << '\n' 120 << "GV "; 121 if (GV) 122 GV->dump(); 123 else 124 dbgs() << "nul"; 125 dbgs() << " CP "; 126 if (CP) 127 CP->dump(); 128 else 129 dbgs() << "nul"; 130 dbgs() << '\n' 131 << "ES "; 132 if (ES) 133 dbgs() << ES; 134 else 135 dbgs() << "nul"; 136 dbgs() << " MCSym "; 137 if (MCSym) 138 dbgs() << MCSym; 139 else 140 dbgs() << "nul"; 141 dbgs() << " JT" << JT << " Align" << Align << '\n'; 142 } 143 #endif 144 }; 145 } 146 147 namespace { 148 //===--------------------------------------------------------------------===// 149 /// ISel - X86-specific code to select X86 machine instructions for 150 /// SelectionDAG operations. 151 /// 152 class X86DAGToDAGISel final : public SelectionDAGISel { 153 /// Keep a pointer to the X86Subtarget around so that we can 154 /// make the right decision when generating code for different targets. 155 const X86Subtarget *Subtarget; 156 157 /// If true, selector should try to optimize for code size instead of 158 /// performance. 159 bool OptForSize; 160 161 /// If true, selector should try to optimize for minimum code size. 162 bool OptForMinSize; 163 164 public: 165 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 166 : SelectionDAGISel(tm, OptLevel), OptForSize(false), 167 OptForMinSize(false) {} 168 169 StringRef getPassName() const override { 170 return "X86 DAG->DAG Instruction Selection"; 171 } 172 173 bool runOnMachineFunction(MachineFunction &MF) override { 174 // Reset the subtarget each time through. 175 Subtarget = &MF.getSubtarget<X86Subtarget>(); 176 SelectionDAGISel::runOnMachineFunction(MF); 177 return true; 178 } 179 180 void EmitFunctionEntryCode() override; 181 182 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override; 183 184 void PreprocessISelDAG() override; 185 void PostprocessISelDAG() override; 186 187 // Include the pieces autogenerated from the target description. 188 #include "X86GenDAGISel.inc" 189 190 private: 191 void Select(SDNode *N) override; 192 193 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 194 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 195 bool matchWrapper(SDValue N, X86ISelAddressMode &AM); 196 bool matchAddress(SDValue N, X86ISelAddressMode &AM); 197 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM); 198 bool matchAdd(SDValue N, X86ISelAddressMode &AM, unsigned Depth); 199 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 200 unsigned Depth); 201 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM); 202 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base, 203 SDValue &Scale, SDValue &Index, SDValue &Disp, 204 SDValue &Segment); 205 bool selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base, 206 SDValue &Scale, SDValue &Index, SDValue &Disp, 207 SDValue &Segment); 208 bool selectMOV64Imm32(SDValue N, SDValue &Imm); 209 bool selectLEAAddr(SDValue N, SDValue &Base, 210 SDValue &Scale, SDValue &Index, SDValue &Disp, 211 SDValue &Segment); 212 bool selectLEA64_32Addr(SDValue N, SDValue &Base, 213 SDValue &Scale, SDValue &Index, SDValue &Disp, 214 SDValue &Segment); 215 bool selectTLSADDRAddr(SDValue N, SDValue &Base, 216 SDValue &Scale, SDValue &Index, SDValue &Disp, 217 SDValue &Segment); 218 bool selectScalarSSELoad(SDNode *Root, SDNode *Parent, SDValue N, 219 SDValue &Base, SDValue &Scale, 220 SDValue &Index, SDValue &Disp, 221 SDValue &Segment, 222 SDValue &NodeWithChain); 223 bool selectRelocImm(SDValue N, SDValue &Op); 224 225 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N, 226 SDValue &Base, SDValue &Scale, 227 SDValue &Index, SDValue &Disp, 228 SDValue &Segment); 229 230 // Convenience method where P is also root. 231 bool tryFoldLoad(SDNode *P, SDValue N, 232 SDValue &Base, SDValue &Scale, 233 SDValue &Index, SDValue &Disp, 234 SDValue &Segment) { 235 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment); 236 } 237 238 // Try to fold a vector load. This makes sure the load isn't non-temporal. 239 bool tryFoldVecLoad(SDNode *Root, SDNode *P, SDValue N, 240 SDValue &Base, SDValue &Scale, 241 SDValue &Index, SDValue &Disp, 242 SDValue &Segment); 243 244 /// Implement addressing mode selection for inline asm expressions. 245 bool SelectInlineAsmMemoryOperand(const SDValue &Op, 246 unsigned ConstraintID, 247 std::vector<SDValue> &OutOps) override; 248 249 void emitSpecialCodeForMain(); 250 251 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL, 252 SDValue &Base, SDValue &Scale, 253 SDValue &Index, SDValue &Disp, 254 SDValue &Segment) { 255 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 256 ? CurDAG->getTargetFrameIndex( 257 AM.Base_FrameIndex, 258 TLI->getPointerTy(CurDAG->getDataLayout())) 259 : AM.Base_Reg; 260 Scale = getI8Imm(AM.Scale, DL); 261 Index = AM.IndexReg; 262 // These are 32-bit even in 64-bit mode since RIP-relative offset 263 // is 32-bit. 264 if (AM.GV) 265 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), 266 MVT::i32, AM.Disp, 267 AM.SymbolFlags); 268 else if (AM.CP) 269 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 270 AM.Align, AM.Disp, AM.SymbolFlags); 271 else if (AM.ES) { 272 assert(!AM.Disp && "Non-zero displacement is ignored with ES."); 273 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 274 } else if (AM.MCSym) { 275 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym."); 276 assert(AM.SymbolFlags == 0 && "oo"); 277 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32); 278 } else if (AM.JT != -1) { 279 assert(!AM.Disp && "Non-zero displacement is ignored with JT."); 280 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 281 } else if (AM.BlockAddr) 282 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, 283 AM.SymbolFlags); 284 else 285 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32); 286 287 if (AM.Segment.getNode()) 288 Segment = AM.Segment; 289 else 290 Segment = CurDAG->getRegister(0, MVT::i32); 291 } 292 293 // Utility function to determine whether we should avoid selecting 294 // immediate forms of instructions for better code size or not. 295 // At a high level, we'd like to avoid such instructions when 296 // we have similar constants used within the same basic block 297 // that can be kept in a register. 298 // 299 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const { 300 uint32_t UseCount = 0; 301 302 // Do not want to hoist if we're not optimizing for size. 303 // TODO: We'd like to remove this restriction. 304 // See the comment in X86InstrInfo.td for more info. 305 if (!OptForSize) 306 return false; 307 308 // Walk all the users of the immediate. 309 for (SDNode::use_iterator UI = N->use_begin(), 310 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) { 311 312 SDNode *User = *UI; 313 314 // This user is already selected. Count it as a legitimate use and 315 // move on. 316 if (User->isMachineOpcode()) { 317 UseCount++; 318 continue; 319 } 320 321 // We want to count stores of immediates as real uses. 322 if (User->getOpcode() == ISD::STORE && 323 User->getOperand(1).getNode() == N) { 324 UseCount++; 325 continue; 326 } 327 328 // We don't currently match users that have > 2 operands (except 329 // for stores, which are handled above) 330 // Those instruction won't match in ISEL, for now, and would 331 // be counted incorrectly. 332 // This may change in the future as we add additional instruction 333 // types. 334 if (User->getNumOperands() != 2) 335 continue; 336 337 // Immediates that are used for offsets as part of stack 338 // manipulation should be left alone. These are typically 339 // used to indicate SP offsets for argument passing and 340 // will get pulled into stores/pushes (implicitly). 341 if (User->getOpcode() == X86ISD::ADD || 342 User->getOpcode() == ISD::ADD || 343 User->getOpcode() == X86ISD::SUB || 344 User->getOpcode() == ISD::SUB) { 345 346 // Find the other operand of the add/sub. 347 SDValue OtherOp = User->getOperand(0); 348 if (OtherOp.getNode() == N) 349 OtherOp = User->getOperand(1); 350 351 // Don't count if the other operand is SP. 352 RegisterSDNode *RegNode; 353 if (OtherOp->getOpcode() == ISD::CopyFromReg && 354 (RegNode = dyn_cast_or_null<RegisterSDNode>( 355 OtherOp->getOperand(1).getNode()))) 356 if ((RegNode->getReg() == X86::ESP) || 357 (RegNode->getReg() == X86::RSP)) 358 continue; 359 } 360 361 // ... otherwise, count this and move on. 362 UseCount++; 363 } 364 365 // If we have more than 1 use, then recommend for hoisting. 366 return (UseCount > 1); 367 } 368 369 /// Return a target constant with the specified value of type i8. 370 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) { 371 return CurDAG->getTargetConstant(Imm, DL, MVT::i8); 372 } 373 374 /// Return a target constant with the specified value, of type i32. 375 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) { 376 return CurDAG->getTargetConstant(Imm, DL, MVT::i32); 377 } 378 379 /// Return a target constant with the specified value, of type i64. 380 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) { 381 return CurDAG->getTargetConstant(Imm, DL, MVT::i64); 382 } 383 384 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth, 385 const SDLoc &DL) { 386 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"); 387 uint64_t Index = N->getConstantOperandVal(1); 388 MVT VecVT = N->getOperand(0).getSimpleValueType(); 389 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL); 390 } 391 392 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth, 393 const SDLoc &DL) { 394 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width"); 395 uint64_t Index = N->getConstantOperandVal(2); 396 MVT VecVT = N->getSimpleValueType(0); 397 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL); 398 } 399 400 /// Return an SDNode that returns the value of the global base register. 401 /// Output instructions required to initialize the global base register, 402 /// if necessary. 403 SDNode *getGlobalBaseReg(); 404 405 /// Return a reference to the TargetMachine, casted to the target-specific 406 /// type. 407 const X86TargetMachine &getTargetMachine() const { 408 return static_cast<const X86TargetMachine &>(TM); 409 } 410 411 /// Return a reference to the TargetInstrInfo, casted to the target-specific 412 /// type. 413 const X86InstrInfo *getInstrInfo() const { 414 return Subtarget->getInstrInfo(); 415 } 416 417 /// Address-mode matching performs shift-of-and to and-of-shift 418 /// reassociation in order to expose more scaled addressing 419 /// opportunities. 420 bool ComplexPatternFuncMutatesDAG() const override { 421 return true; 422 } 423 424 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const; 425 426 /// Returns whether this is a relocatable immediate in the range 427 /// [-2^Width .. 2^Width-1]. 428 template <unsigned Width> bool isSExtRelocImm(SDNode *N) const { 429 if (auto *CN = dyn_cast<ConstantSDNode>(N)) 430 return isInt<Width>(CN->getSExtValue()); 431 return isSExtAbsoluteSymbolRef(Width, N); 432 } 433 434 // Indicates we should prefer to use a non-temporal load for this load. 435 bool useNonTemporalLoad(LoadSDNode *N) const { 436 if (!N->isNonTemporal()) 437 return false; 438 439 unsigned StoreSize = N->getMemoryVT().getStoreSize(); 440 441 if (N->getAlignment() < StoreSize) 442 return false; 443 444 switch (StoreSize) { 445 default: llvm_unreachable("Unsupported store size"); 446 case 16: 447 return Subtarget->hasSSE41(); 448 case 32: 449 return Subtarget->hasAVX2(); 450 case 64: 451 return Subtarget->hasAVX512(); 452 } 453 } 454 455 bool foldLoadStoreIntoMemOperand(SDNode *Node); 456 bool matchBEXTRFromAnd(SDNode *Node); 457 bool shrinkAndImmediate(SDNode *N); 458 bool isMaskZeroExtended(SDNode *N) const; 459 460 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad, 461 const SDLoc &dl, MVT VT, SDNode *Node); 462 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad, 463 const SDLoc &dl, MVT VT, SDNode *Node, 464 SDValue &InFlag); 465 }; 466 } 467 468 469 // Returns true if this masked compare can be implemented legally with this 470 // type. 471 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) { 472 unsigned Opcode = N->getOpcode(); 473 if (Opcode == X86ISD::CMPM || Opcode == ISD::SETCC || 474 Opcode == X86ISD::CMPM_RND || Opcode == X86ISD::VFPCLASS) { 475 // We can get 256-bit 8 element types here without VLX being enabled. When 476 // this happens we will use 512-bit operations and the mask will not be 477 // zero extended. 478 EVT OpVT = N->getOperand(0).getValueType(); 479 if (OpVT.is256BitVector() || OpVT.is128BitVector()) 480 return Subtarget->hasVLX(); 481 482 return true; 483 } 484 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check. 485 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM || 486 Opcode == X86ISD::FSETCCM_RND) 487 return true; 488 489 return false; 490 } 491 492 // Returns true if we can assume the writer of the mask has zero extended it 493 // for us. 494 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const { 495 // If this is an AND, check if we have a compare on either side. As long as 496 // one side guarantees the mask is zero extended, the AND will preserve those 497 // zeros. 498 if (N->getOpcode() == ISD::AND) 499 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) || 500 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget); 501 502 return isLegalMaskCompare(N, Subtarget); 503 } 504 505 bool 506 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 507 if (OptLevel == CodeGenOpt::None) return false; 508 509 if (!N.hasOneUse()) 510 return false; 511 512 if (N.getOpcode() != ISD::LOAD) 513 return true; 514 515 // If N is a load, do additional profitability checks. 516 if (U == Root) { 517 switch (U->getOpcode()) { 518 default: break; 519 case X86ISD::ADD: 520 case X86ISD::SUB: 521 case X86ISD::AND: 522 case X86ISD::XOR: 523 case X86ISD::OR: 524 case ISD::ADD: 525 case ISD::ADDCARRY: 526 case ISD::AND: 527 case ISD::OR: 528 case ISD::XOR: { 529 SDValue Op1 = U->getOperand(1); 530 531 // If the other operand is a 8-bit immediate we should fold the immediate 532 // instead. This reduces code size. 533 // e.g. 534 // movl 4(%esp), %eax 535 // addl $4, %eax 536 // vs. 537 // movl $4, %eax 538 // addl 4(%esp), %eax 539 // The former is 2 bytes shorter. In case where the increment is 1, then 540 // the saving can be 4 bytes (by using incl %eax). 541 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) { 542 if (Imm->getAPIntValue().isSignedIntN(8)) 543 return false; 544 545 // If this is a 64-bit AND with an immediate that fits in 32-bits, 546 // prefer using the smaller and over folding the load. This is needed to 547 // make sure immediates created by shrinkAndImmediate are always folded. 548 // Ideally we would narrow the load during DAG combine and get the 549 // best of both worlds. 550 if (U->getOpcode() == ISD::AND && 551 Imm->getAPIntValue().getBitWidth() == 64 && 552 Imm->getAPIntValue().isIntN(32)) 553 return false; 554 } 555 556 // If the other operand is a TLS address, we should fold it instead. 557 // This produces 558 // movl %gs:0, %eax 559 // leal i@NTPOFF(%eax), %eax 560 // instead of 561 // movl $i@NTPOFF, %eax 562 // addl %gs:0, %eax 563 // if the block also has an access to a second TLS address this will save 564 // a load. 565 // FIXME: This is probably also true for non-TLS addresses. 566 if (Op1.getOpcode() == X86ISD::Wrapper) { 567 SDValue Val = Op1.getOperand(0); 568 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 569 return false; 570 } 571 572 // Don't fold load if this matches the BTS/BTR/BTC patterns. 573 // BTS: (or X, (shl 1, n)) 574 // BTR: (and X, (rotl -2, n)) 575 // BTC: (xor X, (shl 1, n)) 576 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) { 577 if (U->getOperand(0).getOpcode() == ISD::SHL && 578 isOneConstant(U->getOperand(0).getOperand(0))) 579 return false; 580 581 if (U->getOperand(1).getOpcode() == ISD::SHL && 582 isOneConstant(U->getOperand(1).getOperand(0))) 583 return false; 584 } 585 if (U->getOpcode() == ISD::AND) { 586 SDValue U0 = U->getOperand(0); 587 SDValue U1 = U->getOperand(1); 588 if (U0.getOpcode() == ISD::ROTL) { 589 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0)); 590 if (C && C->getSExtValue() == -2) 591 return false; 592 } 593 594 if (U1.getOpcode() == ISD::ROTL) { 595 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0)); 596 if (C && C->getSExtValue() == -2) 597 return false; 598 } 599 } 600 601 break; 602 } 603 case ISD::SHL: 604 case ISD::SRA: 605 case ISD::SRL: 606 // Don't fold a load into a shift by immediate. The BMI2 instructions 607 // support folding a load, but not an immediate. The legacy instructions 608 // support folding an immediate, but can't fold a load. Folding an 609 // immediate is preferable to folding a load. 610 if (isa<ConstantSDNode>(U->getOperand(1))) 611 return false; 612 613 break; 614 } 615 } 616 617 // Prevent folding a load if this can implemented with an insert_subreg or 618 // a move that implicitly zeroes. 619 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR && 620 isNullConstant(Root->getOperand(2)) && 621 (Root->getOperand(0).isUndef() || 622 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode()))) 623 return false; 624 625 return true; 626 } 627 628 /// Replace the original chain operand of the call with 629 /// load's chain operand and move load below the call's chain operand. 630 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 631 SDValue Call, SDValue OrigChain) { 632 SmallVector<SDValue, 8> Ops; 633 SDValue Chain = OrigChain.getOperand(0); 634 if (Chain.getNode() == Load.getNode()) 635 Ops.push_back(Load.getOperand(0)); 636 else { 637 assert(Chain.getOpcode() == ISD::TokenFactor && 638 "Unexpected chain operand"); 639 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 640 if (Chain.getOperand(i).getNode() == Load.getNode()) 641 Ops.push_back(Load.getOperand(0)); 642 else 643 Ops.push_back(Chain.getOperand(i)); 644 SDValue NewChain = 645 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops); 646 Ops.clear(); 647 Ops.push_back(NewChain); 648 } 649 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end()); 650 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops); 651 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 652 Load.getOperand(1), Load.getOperand(2)); 653 654 Ops.clear(); 655 Ops.push_back(SDValue(Load.getNode(), 1)); 656 Ops.append(Call->op_begin() + 1, Call->op_end()); 657 CurDAG->UpdateNodeOperands(Call.getNode(), Ops); 658 } 659 660 /// Return true if call address is a load and it can be 661 /// moved below CALLSEQ_START and the chains leading up to the call. 662 /// Return the CALLSEQ_START by reference as a second output. 663 /// In the case of a tail call, there isn't a callseq node between the call 664 /// chain and the load. 665 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 666 // The transformation is somewhat dangerous if the call's chain was glued to 667 // the call. After MoveBelowOrigChain the load is moved between the call and 668 // the chain, this can create a cycle if the load is not folded. So it is 669 // *really* important that we are sure the load will be folded. 670 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 671 return false; 672 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 673 if (!LD || 674 LD->isVolatile() || 675 LD->getAddressingMode() != ISD::UNINDEXED || 676 LD->getExtensionType() != ISD::NON_EXTLOAD) 677 return false; 678 679 // Now let's find the callseq_start. 680 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 681 if (!Chain.hasOneUse()) 682 return false; 683 Chain = Chain.getOperand(0); 684 } 685 686 if (!Chain.getNumOperands()) 687 return false; 688 // Since we are not checking for AA here, conservatively abort if the chain 689 // writes to memory. It's not safe to move the callee (a load) across a store. 690 if (isa<MemSDNode>(Chain.getNode()) && 691 cast<MemSDNode>(Chain.getNode())->writeMem()) 692 return false; 693 if (Chain.getOperand(0).getNode() == Callee.getNode()) 694 return true; 695 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 696 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 697 Callee.getValue(1).hasOneUse()) 698 return true; 699 return false; 700 } 701 702 void X86DAGToDAGISel::PreprocessISelDAG() { 703 // OptFor[Min]Size are used in pattern predicates that isel is matching. 704 OptForSize = MF->getFunction().optForSize(); 705 OptForMinSize = MF->getFunction().optForMinSize(); 706 assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"); 707 708 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 709 E = CurDAG->allnodes_end(); I != E; ) { 710 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues. 711 712 // If this is a target specific AND node with no flag usages, turn it back 713 // into ISD::AND to enable test instruction matching. 714 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) { 715 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0), 716 N->getOperand(0), N->getOperand(1)); 717 --I; 718 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 719 ++I; 720 CurDAG->DeleteNode(N); 721 continue; 722 } 723 724 if (OptLevel != CodeGenOpt::None && 725 // Only do this when the target can fold the load into the call or 726 // jmp. 727 !Subtarget->useRetpoline() && 728 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) || 729 (N->getOpcode() == X86ISD::TC_RETURN && 730 (Subtarget->is64Bit() || 731 !getTargetMachine().isPositionIndependent())))) { 732 /// Also try moving call address load from outside callseq_start to just 733 /// before the call to allow it to be folded. 734 /// 735 /// [Load chain] 736 /// ^ 737 /// | 738 /// [Load] 739 /// ^ ^ 740 /// | | 741 /// / \-- 742 /// / | 743 ///[CALLSEQ_START] | 744 /// ^ | 745 /// | | 746 /// [LOAD/C2Reg] | 747 /// | | 748 /// \ / 749 /// \ / 750 /// [CALL] 751 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 752 SDValue Chain = N->getOperand(0); 753 SDValue Load = N->getOperand(1); 754 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 755 continue; 756 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 757 ++NumLoadMoved; 758 continue; 759 } 760 761 // Lower fpround and fpextend nodes that target the FP stack to be store and 762 // load to the stack. This is a gross hack. We would like to simply mark 763 // these as being illegal, but when we do that, legalize produces these when 764 // it expands calls, then expands these in the same legalize pass. We would 765 // like dag combine to be able to hack on these between the call expansion 766 // and the node legalization. As such this pass basically does "really 767 // late" legalization of these inline with the X86 isel pass. 768 // FIXME: This should only happen when not compiled with -O0. 769 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 770 continue; 771 772 MVT SrcVT = N->getOperand(0).getSimpleValueType(); 773 MVT DstVT = N->getSimpleValueType(0); 774 775 // If any of the sources are vectors, no fp stack involved. 776 if (SrcVT.isVector() || DstVT.isVector()) 777 continue; 778 779 // If the source and destination are SSE registers, then this is a legal 780 // conversion that should not be lowered. 781 const X86TargetLowering *X86Lowering = 782 static_cast<const X86TargetLowering *>(TLI); 783 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT); 784 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT); 785 if (SrcIsSSE && DstIsSSE) 786 continue; 787 788 if (!SrcIsSSE && !DstIsSSE) { 789 // If this is an FPStack extension, it is a noop. 790 if (N->getOpcode() == ISD::FP_EXTEND) 791 continue; 792 // If this is a value-preserving FPStack truncation, it is a noop. 793 if (N->getConstantOperandVal(1)) 794 continue; 795 } 796 797 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 798 // FPStack has extload and truncstore. SSE can fold direct loads into other 799 // operations. Based on this, decide what we want to do. 800 MVT MemVT; 801 if (N->getOpcode() == ISD::FP_ROUND) 802 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 803 else 804 MemVT = SrcIsSSE ? SrcVT : DstVT; 805 806 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 807 SDLoc dl(N); 808 809 // FIXME: optimize the case where the src/dest is a load or store? 810 SDValue Store = 811 CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, N->getOperand(0), 812 MemTmp, MachinePointerInfo(), MemVT); 813 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 814 MachinePointerInfo(), MemVT); 815 816 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 817 // extload we created. This will cause general havok on the dag because 818 // anything below the conversion could be folded into other existing nodes. 819 // To avoid invalidating 'I', back it up to the convert node. 820 --I; 821 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 822 823 // Now that we did that, the node is dead. Increment the iterator to the 824 // next node to process, then delete N. 825 ++I; 826 CurDAG->DeleteNode(N); 827 } 828 } 829 830 831 void X86DAGToDAGISel::PostprocessISelDAG() { 832 // Skip peepholes at -O0. 833 if (TM.getOptLevel() == CodeGenOpt::None) 834 return; 835 836 // Attempt to remove vectors moves that were inserted to zero upper bits. 837 838 SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode()); 839 ++Position; 840 841 while (Position != CurDAG->allnodes_begin()) { 842 SDNode *N = &*--Position; 843 // Skip dead nodes and any non-machine opcodes. 844 if (N->use_empty() || !N->isMachineOpcode()) 845 continue; 846 847 if (N->getMachineOpcode() != TargetOpcode::SUBREG_TO_REG) 848 continue; 849 850 unsigned SubRegIdx = N->getConstantOperandVal(2); 851 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm) 852 continue; 853 854 SDValue Move = N->getOperand(1); 855 if (!Move.isMachineOpcode()) 856 continue; 857 858 // Make sure its one of the move opcodes we recognize. 859 switch (Move.getMachineOpcode()) { 860 default: 861 continue; 862 case X86::VMOVAPDrr: case X86::VMOVUPDrr: 863 case X86::VMOVAPSrr: case X86::VMOVUPSrr: 864 case X86::VMOVDQArr: case X86::VMOVDQUrr: 865 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr: 866 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr: 867 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr: 868 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr: 869 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr: 870 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr: 871 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr: 872 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr: 873 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr: 874 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr: 875 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr: 876 break; 877 } 878 879 SDValue In = Move.getOperand(0); 880 if (!In.isMachineOpcode() || 881 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END) 882 continue; 883 884 // Producing instruction is another vector instruction. We can drop the 885 // move. 886 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2)); 887 888 // If the move is now dead, delete it. 889 if (Move.getNode()->use_empty()) 890 CurDAG->RemoveDeadNode(Move.getNode()); 891 } 892 } 893 894 895 /// Emit any code that needs to be executed only in the main function. 896 void X86DAGToDAGISel::emitSpecialCodeForMain() { 897 if (Subtarget->isTargetCygMing()) { 898 TargetLowering::ArgListTy Args; 899 auto &DL = CurDAG->getDataLayout(); 900 901 TargetLowering::CallLoweringInfo CLI(*CurDAG); 902 CLI.setChain(CurDAG->getRoot()) 903 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()), 904 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)), 905 std::move(Args)); 906 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo(); 907 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); 908 CurDAG->setRoot(Result.second); 909 } 910 } 911 912 void X86DAGToDAGISel::EmitFunctionEntryCode() { 913 // If this is main, emit special code for main. 914 const Function &F = MF->getFunction(); 915 if (F.hasExternalLinkage() && F.getName() == "main") 916 emitSpecialCodeForMain(); 917 } 918 919 static bool isDispSafeForFrameIndex(int64_t Val) { 920 // On 64-bit platforms, we can run into an issue where a frame index 921 // includes a displacement that, when added to the explicit displacement, 922 // will overflow the displacement field. Assuming that the frame index 923 // displacement fits into a 31-bit integer (which is only slightly more 924 // aggressive than the current fundamental assumption that it fits into 925 // a 32-bit integer), a 31-bit disp should always be safe. 926 return isInt<31>(Val); 927 } 928 929 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset, 930 X86ISelAddressMode &AM) { 931 // If there's no offset to fold, we don't need to do any work. 932 if (Offset == 0) 933 return false; 934 935 // Cannot combine ExternalSymbol displacements with integer offsets. 936 if (AM.ES || AM.MCSym) 937 return true; 938 939 int64_t Val = AM.Disp + Offset; 940 CodeModel::Model M = TM.getCodeModel(); 941 if (Subtarget->is64Bit()) { 942 if (!X86::isOffsetSuitableForCodeModel(Val, M, 943 AM.hasSymbolicDisplacement())) 944 return true; 945 // In addition to the checks required for a register base, check that 946 // we do not try to use an unsafe Disp with a frame index. 947 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 948 !isDispSafeForFrameIndex(Val)) 949 return true; 950 } 951 AM.Disp = Val; 952 return false; 953 954 } 955 956 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 957 SDValue Address = N->getOperand(1); 958 959 // load gs:0 -> GS segment register. 960 // load fs:0 -> FS segment register. 961 // 962 // This optimization is valid because the GNU TLS model defines that 963 // gs:0 (or fs:0 on X86-64) contains its own address. 964 // For more information see http://people.redhat.com/drepper/tls.pdf 965 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 966 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr && 967 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() || 968 Subtarget->isTargetFuchsia())) 969 switch (N->getPointerInfo().getAddrSpace()) { 970 case 256: 971 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 972 return false; 973 case 257: 974 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 975 return false; 976 // Address space 258 is not handled here, because it is not used to 977 // address TLS areas. 978 } 979 980 return true; 981 } 982 983 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing 984 /// mode. These wrap things that will resolve down into a symbol reference. 985 /// If no match is possible, this returns true, otherwise it returns false. 986 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) { 987 // If the addressing mode already has a symbol as the displacement, we can 988 // never match another symbol. 989 if (AM.hasSymbolicDisplacement()) 990 return true; 991 992 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP; 993 994 // We can't use an addressing mode in the 64-bit large code model. In the 995 // medium code model, we use can use an mode when RIP wrappers are present. 996 // That signifies access to globals that are known to be "near", such as the 997 // GOT itself. 998 CodeModel::Model M = TM.getCodeModel(); 999 if (Subtarget->is64Bit() && 1000 (M == CodeModel::Large || (M == CodeModel::Medium && !IsRIPRel))) 1001 return true; 1002 1003 // Base and index reg must be 0 in order to use %rip as base. 1004 if (IsRIPRel && AM.hasBaseOrIndexReg()) 1005 return true; 1006 1007 // Make a local copy in case we can't do this fold. 1008 X86ISelAddressMode Backup = AM; 1009 1010 int64_t Offset = 0; 1011 SDValue N0 = N.getOperand(0); 1012 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 1013 AM.GV = G->getGlobal(); 1014 AM.SymbolFlags = G->getTargetFlags(); 1015 Offset = G->getOffset(); 1016 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 1017 AM.CP = CP->getConstVal(); 1018 AM.Align = CP->getAlignment(); 1019 AM.SymbolFlags = CP->getTargetFlags(); 1020 Offset = CP->getOffset(); 1021 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 1022 AM.ES = S->getSymbol(); 1023 AM.SymbolFlags = S->getTargetFlags(); 1024 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) { 1025 AM.MCSym = S->getMCSymbol(); 1026 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 1027 AM.JT = J->getIndex(); 1028 AM.SymbolFlags = J->getTargetFlags(); 1029 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 1030 AM.BlockAddr = BA->getBlockAddress(); 1031 AM.SymbolFlags = BA->getTargetFlags(); 1032 Offset = BA->getOffset(); 1033 } else 1034 llvm_unreachable("Unhandled symbol reference node."); 1035 1036 if (foldOffsetIntoAddress(Offset, AM)) { 1037 AM = Backup; 1038 return true; 1039 } 1040 1041 if (IsRIPRel) 1042 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 1043 1044 // Commit the changes now that we know this fold is safe. 1045 return false; 1046 } 1047 1048 /// Add the specified node to the specified addressing mode, returning true if 1049 /// it cannot be done. This just pattern matches for the addressing mode. 1050 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) { 1051 if (matchAddressRecursively(N, AM, 0)) 1052 return true; 1053 1054 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 1055 // a smaller encoding and avoids a scaled-index. 1056 if (AM.Scale == 2 && 1057 AM.BaseType == X86ISelAddressMode::RegBase && 1058 AM.Base_Reg.getNode() == nullptr) { 1059 AM.Base_Reg = AM.IndexReg; 1060 AM.Scale = 1; 1061 } 1062 1063 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 1064 // because it has a smaller encoding. 1065 // TODO: Which other code models can use this? 1066 if (TM.getCodeModel() == CodeModel::Small && 1067 Subtarget->is64Bit() && 1068 AM.Scale == 1 && 1069 AM.BaseType == X86ISelAddressMode::RegBase && 1070 AM.Base_Reg.getNode() == nullptr && 1071 AM.IndexReg.getNode() == nullptr && 1072 AM.SymbolFlags == X86II::MO_NO_FLAG && 1073 AM.hasSymbolicDisplacement()) 1074 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 1075 1076 return false; 1077 } 1078 1079 bool X86DAGToDAGISel::matchAdd(SDValue N, X86ISelAddressMode &AM, 1080 unsigned Depth) { 1081 // Add an artificial use to this node so that we can keep track of 1082 // it if it gets CSE'd with a different node. 1083 HandleSDNode Handle(N); 1084 1085 X86ISelAddressMode Backup = AM; 1086 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1087 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1088 return false; 1089 AM = Backup; 1090 1091 // Try again after commuting the operands. 1092 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1) && 1093 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1094 return false; 1095 AM = Backup; 1096 1097 // If we couldn't fold both operands into the address at the same time, 1098 // see if we can just put each operand into a register and fold at least 1099 // the add. 1100 if (AM.BaseType == X86ISelAddressMode::RegBase && 1101 !AM.Base_Reg.getNode() && 1102 !AM.IndexReg.getNode()) { 1103 N = Handle.getValue(); 1104 AM.Base_Reg = N.getOperand(0); 1105 AM.IndexReg = N.getOperand(1); 1106 AM.Scale = 1; 1107 return false; 1108 } 1109 N = Handle.getValue(); 1110 return true; 1111 } 1112 1113 // Insert a node into the DAG at least before the Pos node's position. This 1114 // will reposition the node as needed, and will assign it a node ID that is <= 1115 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node 1116 // IDs! The selection DAG must no longer depend on their uniqueness when this 1117 // is used. 1118 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 1119 if (N->getNodeId() == -1 || 1120 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) > 1121 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) { 1122 DAG.RepositionNode(Pos->getIterator(), N.getNode()); 1123 // Mark Node as invalid for pruning as after this it may be a successor to a 1124 // selected node but otherwise be in the same position of Pos. 1125 // Conservatively mark it with the same -abs(Id) to assure node id 1126 // invariant is preserved. 1127 N->setNodeId(Pos->getNodeId()); 1128 SelectionDAGISel::InvalidateNodeId(N.getNode()); 1129 } 1130 } 1131 1132 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if 1133 // safe. This allows us to convert the shift and and into an h-register 1134 // extract and a scaled index. Returns false if the simplification is 1135 // performed. 1136 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 1137 uint64_t Mask, 1138 SDValue Shift, SDValue X, 1139 X86ISelAddressMode &AM) { 1140 if (Shift.getOpcode() != ISD::SRL || 1141 !isa<ConstantSDNode>(Shift.getOperand(1)) || 1142 !Shift.hasOneUse()) 1143 return true; 1144 1145 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 1146 if (ScaleLog <= 0 || ScaleLog >= 4 || 1147 Mask != (0xffu << ScaleLog)) 1148 return true; 1149 1150 MVT VT = N.getSimpleValueType(); 1151 SDLoc DL(N); 1152 SDValue Eight = DAG.getConstant(8, DL, MVT::i8); 1153 SDValue NewMask = DAG.getConstant(0xff, DL, VT); 1154 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 1155 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 1156 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8); 1157 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 1158 1159 // Insert the new nodes into the topological ordering. We must do this in 1160 // a valid topological ordering as nothing is going to go back and re-sort 1161 // these nodes. We continually insert before 'N' in sequence as this is 1162 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 1163 // hierarchy left to express. 1164 insertDAGNode(DAG, N, Eight); 1165 insertDAGNode(DAG, N, Srl); 1166 insertDAGNode(DAG, N, NewMask); 1167 insertDAGNode(DAG, N, And); 1168 insertDAGNode(DAG, N, ShlCount); 1169 insertDAGNode(DAG, N, Shl); 1170 DAG.ReplaceAllUsesWith(N, Shl); 1171 AM.IndexReg = And; 1172 AM.Scale = (1 << ScaleLog); 1173 return false; 1174 } 1175 1176 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 1177 // allows us to fold the shift into this addressing mode. Returns false if the 1178 // transform succeeded. 1179 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 1180 uint64_t Mask, 1181 SDValue Shift, SDValue X, 1182 X86ISelAddressMode &AM) { 1183 if (Shift.getOpcode() != ISD::SHL || 1184 !isa<ConstantSDNode>(Shift.getOperand(1))) 1185 return true; 1186 1187 // Not likely to be profitable if either the AND or SHIFT node has more 1188 // than one use (unless all uses are for address computation). Besides, 1189 // isel mechanism requires their node ids to be reused. 1190 if (!N.hasOneUse() || !Shift.hasOneUse()) 1191 return true; 1192 1193 // Verify that the shift amount is something we can fold. 1194 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 1195 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 1196 return true; 1197 1198 MVT VT = N.getSimpleValueType(); 1199 SDLoc DL(N); 1200 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT); 1201 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 1202 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 1203 1204 // Insert the new nodes into the topological ordering. We must do this in 1205 // a valid topological ordering as nothing is going to go back and re-sort 1206 // these nodes. We continually insert before 'N' in sequence as this is 1207 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 1208 // hierarchy left to express. 1209 insertDAGNode(DAG, N, NewMask); 1210 insertDAGNode(DAG, N, NewAnd); 1211 insertDAGNode(DAG, N, NewShift); 1212 DAG.ReplaceAllUsesWith(N, NewShift); 1213 1214 AM.Scale = 1 << ShiftAmt; 1215 AM.IndexReg = NewAnd; 1216 return false; 1217 } 1218 1219 // Implement some heroics to detect shifts of masked values where the mask can 1220 // be replaced by extending the shift and undoing that in the addressing mode 1221 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 1222 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 1223 // the addressing mode. This results in code such as: 1224 // 1225 // int f(short *y, int *lookup_table) { 1226 // ... 1227 // return *y + lookup_table[*y >> 11]; 1228 // } 1229 // 1230 // Turning into: 1231 // movzwl (%rdi), %eax 1232 // movl %eax, %ecx 1233 // shrl $11, %ecx 1234 // addl (%rsi,%rcx,4), %eax 1235 // 1236 // Instead of: 1237 // movzwl (%rdi), %eax 1238 // movl %eax, %ecx 1239 // shrl $9, %ecx 1240 // andl $124, %rcx 1241 // addl (%rsi,%rcx), %eax 1242 // 1243 // Note that this function assumes the mask is provided as a mask *after* the 1244 // value is shifted. The input chain may or may not match that, but computing 1245 // such a mask is trivial. 1246 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 1247 uint64_t Mask, 1248 SDValue Shift, SDValue X, 1249 X86ISelAddressMode &AM) { 1250 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 1251 !isa<ConstantSDNode>(Shift.getOperand(1))) 1252 return true; 1253 1254 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 1255 unsigned MaskLZ = countLeadingZeros(Mask); 1256 unsigned MaskTZ = countTrailingZeros(Mask); 1257 1258 // The amount of shift we're trying to fit into the addressing mode is taken 1259 // from the trailing zeros of the mask. 1260 unsigned AMShiftAmt = MaskTZ; 1261 1262 // There is nothing we can do here unless the mask is removing some bits. 1263 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 1264 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 1265 1266 // We also need to ensure that mask is a continuous run of bits. 1267 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 1268 1269 // Scale the leading zero count down based on the actual size of the value. 1270 // Also scale it down based on the size of the shift. 1271 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt; 1272 if (MaskLZ < ScaleDown) 1273 return true; 1274 MaskLZ -= ScaleDown; 1275 1276 // The final check is to ensure that any masked out high bits of X are 1277 // already known to be zero. Otherwise, the mask has a semantic impact 1278 // other than masking out a couple of low bits. Unfortunately, because of 1279 // the mask, zero extensions will be removed from operands in some cases. 1280 // This code works extra hard to look through extensions because we can 1281 // replace them with zero extensions cheaply if necessary. 1282 bool ReplacingAnyExtend = false; 1283 if (X.getOpcode() == ISD::ANY_EXTEND) { 1284 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() - 1285 X.getOperand(0).getSimpleValueType().getSizeInBits(); 1286 // Assume that we'll replace the any-extend with a zero-extend, and 1287 // narrow the search to the extended value. 1288 X = X.getOperand(0); 1289 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 1290 ReplacingAnyExtend = true; 1291 } 1292 APInt MaskedHighBits = 1293 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ); 1294 KnownBits Known; 1295 DAG.computeKnownBits(X, Known); 1296 if (MaskedHighBits != Known.Zero) return true; 1297 1298 // We've identified a pattern that can be transformed into a single shift 1299 // and an addressing mode. Make it so. 1300 MVT VT = N.getSimpleValueType(); 1301 if (ReplacingAnyExtend) { 1302 assert(X.getValueType() != VT); 1303 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 1304 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X); 1305 insertDAGNode(DAG, N, NewX); 1306 X = NewX; 1307 } 1308 SDLoc DL(N); 1309 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8); 1310 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 1311 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8); 1312 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 1313 1314 // Insert the new nodes into the topological ordering. We must do this in 1315 // a valid topological ordering as nothing is going to go back and re-sort 1316 // these nodes. We continually insert before 'N' in sequence as this is 1317 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 1318 // hierarchy left to express. 1319 insertDAGNode(DAG, N, NewSRLAmt); 1320 insertDAGNode(DAG, N, NewSRL); 1321 insertDAGNode(DAG, N, NewSHLAmt); 1322 insertDAGNode(DAG, N, NewSHL); 1323 DAG.ReplaceAllUsesWith(N, NewSHL); 1324 1325 AM.Scale = 1 << AMShiftAmt; 1326 AM.IndexReg = NewSRL; 1327 return false; 1328 } 1329 1330 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 1331 unsigned Depth) { 1332 SDLoc dl(N); 1333 LLVM_DEBUG({ 1334 dbgs() << "MatchAddress: "; 1335 AM.dump(CurDAG); 1336 }); 1337 // Limit recursion. 1338 if (Depth > 5) 1339 return matchAddressBase(N, AM); 1340 1341 // If this is already a %rip relative address, we can only merge immediates 1342 // into it. Instead of handling this in every case, we handle it here. 1343 // RIP relative addressing: %rip + 32-bit displacement! 1344 if (AM.isRIPRelative()) { 1345 // FIXME: JumpTable and ExternalSymbol address currently don't like 1346 // displacements. It isn't very important, but this should be fixed for 1347 // consistency. 1348 if (!(AM.ES || AM.MCSym) && AM.JT != -1) 1349 return true; 1350 1351 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 1352 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM)) 1353 return false; 1354 return true; 1355 } 1356 1357 switch (N.getOpcode()) { 1358 default: break; 1359 case ISD::LOCAL_RECOVER: { 1360 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0) 1361 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) { 1362 // Use the symbol and don't prefix it. 1363 AM.MCSym = ESNode->getMCSymbol(); 1364 return false; 1365 } 1366 break; 1367 } 1368 case ISD::Constant: { 1369 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 1370 if (!foldOffsetIntoAddress(Val, AM)) 1371 return false; 1372 break; 1373 } 1374 1375 case X86ISD::Wrapper: 1376 case X86ISD::WrapperRIP: 1377 if (!matchWrapper(N, AM)) 1378 return false; 1379 break; 1380 1381 case ISD::LOAD: 1382 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM)) 1383 return false; 1384 break; 1385 1386 case ISD::FrameIndex: 1387 if (AM.BaseType == X86ISelAddressMode::RegBase && 1388 AM.Base_Reg.getNode() == nullptr && 1389 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 1390 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 1391 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 1392 return false; 1393 } 1394 break; 1395 1396 case ISD::SHL: 1397 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) 1398 break; 1399 1400 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) { 1401 unsigned Val = CN->getZExtValue(); 1402 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1403 // that the base operand remains free for further matching. If 1404 // the base doesn't end up getting used, a post-processing step 1405 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1406 if (Val == 1 || Val == 2 || Val == 3) { 1407 AM.Scale = 1 << Val; 1408 SDValue ShVal = N.getOperand(0); 1409 1410 // Okay, we know that we have a scale by now. However, if the scaled 1411 // value is an add of something and a constant, we can fold the 1412 // constant into the disp field here. 1413 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1414 AM.IndexReg = ShVal.getOperand(0); 1415 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1)); 1416 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; 1417 if (!foldOffsetIntoAddress(Disp, AM)) 1418 return false; 1419 } 1420 1421 AM.IndexReg = ShVal; 1422 return false; 1423 } 1424 } 1425 break; 1426 1427 case ISD::SRL: { 1428 // Scale must not be used already. 1429 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; 1430 1431 SDValue And = N.getOperand(0); 1432 if (And.getOpcode() != ISD::AND) break; 1433 SDValue X = And.getOperand(0); 1434 1435 // We only handle up to 64-bit values here as those are what matter for 1436 // addressing mode optimizations. 1437 if (X.getSimpleValueType().getSizeInBits() > 64) break; 1438 1439 // The mask used for the transform is expected to be post-shift, but we 1440 // found the shift first so just apply the shift to the mask before passing 1441 // it down. 1442 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1443 !isa<ConstantSDNode>(And.getOperand(1))) 1444 break; 1445 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1446 1447 // Try to fold the mask and shift into the scale, and return false if we 1448 // succeed. 1449 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1450 return false; 1451 break; 1452 } 1453 1454 case ISD::SMUL_LOHI: 1455 case ISD::UMUL_LOHI: 1456 // A mul_lohi where we need the low part can be folded as a plain multiply. 1457 if (N.getResNo() != 0) break; 1458 LLVM_FALLTHROUGH; 1459 case ISD::MUL: 1460 case X86ISD::MUL_IMM: 1461 // X*[3,5,9] -> X+X*[2,4,8] 1462 if (AM.BaseType == X86ISelAddressMode::RegBase && 1463 AM.Base_Reg.getNode() == nullptr && 1464 AM.IndexReg.getNode() == nullptr) { 1465 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) 1466 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1467 CN->getZExtValue() == 9) { 1468 AM.Scale = unsigned(CN->getZExtValue())-1; 1469 1470 SDValue MulVal = N.getOperand(0); 1471 SDValue Reg; 1472 1473 // Okay, we know that we have a scale by now. However, if the scaled 1474 // value is an add of something and a constant, we can fold the 1475 // constant into the disp field here. 1476 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1477 isa<ConstantSDNode>(MulVal.getOperand(1))) { 1478 Reg = MulVal.getOperand(0); 1479 ConstantSDNode *AddVal = 1480 cast<ConstantSDNode>(MulVal.getOperand(1)); 1481 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1482 if (foldOffsetIntoAddress(Disp, AM)) 1483 Reg = N.getOperand(0); 1484 } else { 1485 Reg = N.getOperand(0); 1486 } 1487 1488 AM.IndexReg = AM.Base_Reg = Reg; 1489 return false; 1490 } 1491 } 1492 break; 1493 1494 case ISD::SUB: { 1495 // Given A-B, if A can be completely folded into the address and 1496 // the index field with the index field unused, use -B as the index. 1497 // This is a win if a has multiple parts that can be folded into 1498 // the address. Also, this saves a mov if the base register has 1499 // other uses, since it avoids a two-address sub instruction, however 1500 // it costs an additional mov if the index register has other uses. 1501 1502 // Add an artificial use to this node so that we can keep track of 1503 // it if it gets CSE'd with a different node. 1504 HandleSDNode Handle(N); 1505 1506 // Test if the LHS of the sub can be folded. 1507 X86ISelAddressMode Backup = AM; 1508 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) { 1509 AM = Backup; 1510 break; 1511 } 1512 // Test if the index field is free for use. 1513 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1514 AM = Backup; 1515 break; 1516 } 1517 1518 int Cost = 0; 1519 SDValue RHS = Handle.getValue().getOperand(1); 1520 // If the RHS involves a register with multiple uses, this 1521 // transformation incurs an extra mov, due to the neg instruction 1522 // clobbering its operand. 1523 if (!RHS.getNode()->hasOneUse() || 1524 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1525 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1526 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1527 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1528 RHS.getOperand(0).getValueType() == MVT::i32)) 1529 ++Cost; 1530 // If the base is a register with multiple uses, this 1531 // transformation may save a mov. 1532 // FIXME: Don't rely on DELETED_NODEs. 1533 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() && 1534 AM.Base_Reg->getOpcode() != ISD::DELETED_NODE && 1535 !AM.Base_Reg.getNode()->hasOneUse()) || 1536 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1537 --Cost; 1538 // If the folded LHS was interesting, this transformation saves 1539 // address arithmetic. 1540 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1541 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1542 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1543 --Cost; 1544 // If it doesn't look like it may be an overall win, don't do it. 1545 if (Cost >= 0) { 1546 AM = Backup; 1547 break; 1548 } 1549 1550 // Ok, the transformation is legal and appears profitable. Go for it. 1551 SDValue Zero = CurDAG->getConstant(0, dl, N.getValueType()); 1552 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1553 AM.IndexReg = Neg; 1554 AM.Scale = 1; 1555 1556 // Insert the new nodes into the topological ordering. 1557 insertDAGNode(*CurDAG, Handle.getValue(), Zero); 1558 insertDAGNode(*CurDAG, Handle.getValue(), Neg); 1559 return false; 1560 } 1561 1562 case ISD::ADD: 1563 if (!matchAdd(N, AM, Depth)) 1564 return false; 1565 break; 1566 1567 case ISD::OR: 1568 // We want to look through a transform in InstCombine and DAGCombiner that 1569 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'. 1570 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3)) 1571 // An 'lea' can then be used to match the shift (multiply) and add: 1572 // and $1, %esi 1573 // lea (%rsi, %rdi, 8), %rax 1574 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) && 1575 !matchAdd(N, AM, Depth)) 1576 return false; 1577 break; 1578 1579 case ISD::AND: { 1580 // Perform some heroic transforms on an and of a constant-count shift 1581 // with a constant to enable use of the scaled offset field. 1582 1583 // Scale must not be used already. 1584 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break; 1585 1586 SDValue Shift = N.getOperand(0); 1587 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1588 SDValue X = Shift.getOperand(0); 1589 1590 // We only handle up to 64-bit values here as those are what matter for 1591 // addressing mode optimizations. 1592 if (X.getSimpleValueType().getSizeInBits() > 64) break; 1593 1594 if (!isa<ConstantSDNode>(N.getOperand(1))) 1595 break; 1596 uint64_t Mask = N.getConstantOperandVal(1); 1597 1598 // Try to fold the mask and shift into an extract and scale. 1599 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 1600 return false; 1601 1602 // Try to fold the mask and shift directly into the scale. 1603 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 1604 return false; 1605 1606 // Try to swap the mask and shift to place shifts which can be done as 1607 // a scale on the outside of the mask. 1608 if (!foldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) 1609 return false; 1610 break; 1611 } 1612 } 1613 1614 return matchAddressBase(N, AM); 1615 } 1616 1617 /// Helper for MatchAddress. Add the specified node to the 1618 /// specified addressing mode without any further recursion. 1619 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1620 // Is the base register already occupied? 1621 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1622 // If so, check to see if the scale index register is set. 1623 if (!AM.IndexReg.getNode()) { 1624 AM.IndexReg = N; 1625 AM.Scale = 1; 1626 return false; 1627 } 1628 1629 // Otherwise, we cannot select it. 1630 return true; 1631 } 1632 1633 // Default, generate it as a register. 1634 AM.BaseType = X86ISelAddressMode::RegBase; 1635 AM.Base_Reg = N; 1636 return false; 1637 } 1638 1639 /// Helper for selectVectorAddr. Handles things that can be folded into a 1640 /// gather scatter address. The index register and scale should have already 1641 /// been handled. 1642 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) { 1643 // TODO: Support other operations. 1644 switch (N.getOpcode()) { 1645 case ISD::Constant: { 1646 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 1647 if (!foldOffsetIntoAddress(Val, AM)) 1648 return false; 1649 break; 1650 } 1651 case X86ISD::Wrapper: 1652 if (!matchWrapper(N, AM)) 1653 return false; 1654 break; 1655 } 1656 1657 return matchAddressBase(N, AM); 1658 } 1659 1660 bool X86DAGToDAGISel::selectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base, 1661 SDValue &Scale, SDValue &Index, 1662 SDValue &Disp, SDValue &Segment) { 1663 X86ISelAddressMode AM; 1664 auto *Mgs = cast<X86MaskedGatherScatterSDNode>(Parent); 1665 AM.IndexReg = Mgs->getIndex(); 1666 AM.Scale = cast<ConstantSDNode>(Mgs->getScale())->getZExtValue(); 1667 1668 unsigned AddrSpace = cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1669 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS. 1670 if (AddrSpace == 256) 1671 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1672 if (AddrSpace == 257) 1673 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1674 if (AddrSpace == 258) 1675 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16); 1676 1677 // Try to match into the base and displacement fields. 1678 if (matchVectorAddress(N, AM)) 1679 return false; 1680 1681 MVT VT = N.getSimpleValueType(); 1682 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1683 if (!AM.Base_Reg.getNode()) 1684 AM.Base_Reg = CurDAG->getRegister(0, VT); 1685 } 1686 1687 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment); 1688 return true; 1689 } 1690 1691 /// Returns true if it is able to pattern match an addressing mode. 1692 /// It returns the operands which make up the maximal addressing mode it can 1693 /// match by reference. 1694 /// 1695 /// Parent is the parent node of the addr operand that is being matched. It 1696 /// is always a load, store, atomic node, or null. It is only null when 1697 /// checking memory operands for inline asm nodes. 1698 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1699 SDValue &Scale, SDValue &Index, 1700 SDValue &Disp, SDValue &Segment) { 1701 X86ISelAddressMode AM; 1702 1703 if (Parent && 1704 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1705 // that are not a MemSDNode, and thus don't have proper addrspace info. 1706 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1707 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1708 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme 1709 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp 1710 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp 1711 unsigned AddrSpace = 1712 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1713 // AddrSpace 256 -> GS, 257 -> FS, 258 -> SS. 1714 if (AddrSpace == 256) 1715 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1716 if (AddrSpace == 257) 1717 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1718 if (AddrSpace == 258) 1719 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16); 1720 } 1721 1722 if (matchAddress(N, AM)) 1723 return false; 1724 1725 MVT VT = N.getSimpleValueType(); 1726 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1727 if (!AM.Base_Reg.getNode()) 1728 AM.Base_Reg = CurDAG->getRegister(0, VT); 1729 } 1730 1731 if (!AM.IndexReg.getNode()) 1732 AM.IndexReg = CurDAG->getRegister(0, VT); 1733 1734 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment); 1735 return true; 1736 } 1737 1738 // We can only fold a load if all nodes between it and the root node have a 1739 // single use. If there are additional uses, we could end up duplicating the 1740 // load. 1741 static bool hasSingleUsesFromRoot(SDNode *Root, SDNode *User) { 1742 while (User != Root) { 1743 if (!User->hasOneUse()) 1744 return false; 1745 User = *User->use_begin(); 1746 } 1747 1748 return true; 1749 } 1750 1751 /// Match a scalar SSE load. In particular, we want to match a load whose top 1752 /// elements are either undef or zeros. The load flavor is derived from the 1753 /// type of N, which is either v4f32 or v2f64. 1754 /// 1755 /// We also return: 1756 /// PatternChainNode: this is the matched node that has a chain input and 1757 /// output. 1758 bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent, 1759 SDValue N, SDValue &Base, 1760 SDValue &Scale, SDValue &Index, 1761 SDValue &Disp, SDValue &Segment, 1762 SDValue &PatternNodeWithChain) { 1763 if (!hasSingleUsesFromRoot(Root, Parent)) 1764 return false; 1765 1766 // We can allow a full vector load here since narrowing a load is ok. 1767 if (ISD::isNON_EXTLoad(N.getNode())) { 1768 PatternNodeWithChain = N; 1769 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) && 1770 IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) { 1771 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1772 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, 1773 Segment); 1774 } 1775 } 1776 1777 // We can also match the special zero extended load opcode. 1778 if (N.getOpcode() == X86ISD::VZEXT_LOAD) { 1779 PatternNodeWithChain = N; 1780 if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) && 1781 IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) { 1782 auto *MI = cast<MemIntrinsicSDNode>(PatternNodeWithChain); 1783 return selectAddr(MI, MI->getBasePtr(), Base, Scale, Index, Disp, 1784 Segment); 1785 } 1786 } 1787 1788 // Need to make sure that the SCALAR_TO_VECTOR and load are both only used 1789 // once. Otherwise the load might get duplicated and the chain output of the 1790 // duplicate load will not be observed by all dependencies. 1791 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR && N.getNode()->hasOneUse()) { 1792 PatternNodeWithChain = N.getOperand(0); 1793 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1794 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) && 1795 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) { 1796 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1797 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, 1798 Segment); 1799 } 1800 } 1801 1802 // Also handle the case where we explicitly require zeros in the top 1803 // elements. This is a vector shuffle from the zero vector. 1804 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1805 // Check to see if the top elements are all zeros (or bitcast of zeros). 1806 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1807 N.getOperand(0).getNode()->hasOneUse()) { 1808 PatternNodeWithChain = N.getOperand(0).getOperand(0); 1809 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1810 IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) && 1811 IsLegalToFold(PatternNodeWithChain, N.getNode(), Root, OptLevel)) { 1812 // Okay, this is a zero extending load. Fold it. 1813 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1814 return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, 1815 Segment); 1816 } 1817 } 1818 1819 return false; 1820 } 1821 1822 1823 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) { 1824 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1825 uint64_t ImmVal = CN->getZExtValue(); 1826 if (!isUInt<32>(ImmVal)) 1827 return false; 1828 1829 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64); 1830 return true; 1831 } 1832 1833 // In static codegen with small code model, we can get the address of a label 1834 // into a register with 'movl' 1835 if (N->getOpcode() != X86ISD::Wrapper) 1836 return false; 1837 1838 N = N.getOperand(0); 1839 1840 // At least GNU as does not accept 'movl' for TPOFF relocations. 1841 // FIXME: We could use 'movl' when we know we are targeting MC. 1842 if (N->getOpcode() == ISD::TargetGlobalTLSAddress) 1843 return false; 1844 1845 Imm = N; 1846 if (N->getOpcode() != ISD::TargetGlobalAddress) 1847 return TM.getCodeModel() == CodeModel::Small; 1848 1849 Optional<ConstantRange> CR = 1850 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange(); 1851 if (!CR) 1852 return TM.getCodeModel() == CodeModel::Small; 1853 1854 return CR->getUnsignedMax().ult(1ull << 32); 1855 } 1856 1857 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base, 1858 SDValue &Scale, SDValue &Index, 1859 SDValue &Disp, SDValue &Segment) { 1860 // Save the debug loc before calling selectLEAAddr, in case it invalidates N. 1861 SDLoc DL(N); 1862 1863 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment)) 1864 return false; 1865 1866 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base); 1867 if (RN && RN->getReg() == 0) 1868 Base = CurDAG->getRegister(0, MVT::i64); 1869 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) { 1870 // Base could already be %rip, particularly in the x32 ABI. 1871 Base = SDValue(CurDAG->getMachineNode( 1872 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1873 CurDAG->getTargetConstant(0, DL, MVT::i64), 1874 Base, 1875 CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)), 1876 0); 1877 } 1878 1879 RN = dyn_cast<RegisterSDNode>(Index); 1880 if (RN && RN->getReg() == 0) 1881 Index = CurDAG->getRegister(0, MVT::i64); 1882 else { 1883 assert(Index.getValueType() == MVT::i32 && 1884 "Expect to be extending 32-bit registers for use in LEA"); 1885 Index = SDValue(CurDAG->getMachineNode( 1886 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, 1887 CurDAG->getTargetConstant(0, DL, MVT::i64), 1888 Index, 1889 CurDAG->getTargetConstant(X86::sub_32bit, DL, 1890 MVT::i32)), 1891 0); 1892 } 1893 1894 return true; 1895 } 1896 1897 /// Calls SelectAddr and determines if the maximal addressing 1898 /// mode it matches can be cost effectively emitted as an LEA instruction. 1899 bool X86DAGToDAGISel::selectLEAAddr(SDValue N, 1900 SDValue &Base, SDValue &Scale, 1901 SDValue &Index, SDValue &Disp, 1902 SDValue &Segment) { 1903 X86ISelAddressMode AM; 1904 1905 // Save the DL and VT before calling matchAddress, it can invalidate N. 1906 SDLoc DL(N); 1907 MVT VT = N.getSimpleValueType(); 1908 1909 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1910 // segments. 1911 SDValue Copy = AM.Segment; 1912 SDValue T = CurDAG->getRegister(0, MVT::i32); 1913 AM.Segment = T; 1914 if (matchAddress(N, AM)) 1915 return false; 1916 assert (T == AM.Segment); 1917 AM.Segment = Copy; 1918 1919 unsigned Complexity = 0; 1920 if (AM.BaseType == X86ISelAddressMode::RegBase) 1921 if (AM.Base_Reg.getNode()) 1922 Complexity = 1; 1923 else 1924 AM.Base_Reg = CurDAG->getRegister(0, VT); 1925 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1926 Complexity = 4; 1927 1928 if (AM.IndexReg.getNode()) 1929 Complexity++; 1930 else 1931 AM.IndexReg = CurDAG->getRegister(0, VT); 1932 1933 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1934 // a simple shift. 1935 if (AM.Scale > 1) 1936 Complexity++; 1937 1938 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1939 // to a LEA. This is determined with some experimentation but is by no means 1940 // optimal (especially for code size consideration). LEA is nice because of 1941 // its three-address nature. Tweak the cost function again when we can run 1942 // convertToThreeAddress() at register allocation time. 1943 if (AM.hasSymbolicDisplacement()) { 1944 // For X86-64, always use LEA to materialize RIP-relative addresses. 1945 if (Subtarget->is64Bit()) 1946 Complexity = 4; 1947 else 1948 Complexity += 2; 1949 } 1950 1951 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1952 Complexity++; 1953 1954 // If it isn't worth using an LEA, reject it. 1955 if (Complexity <= 2) 1956 return false; 1957 1958 getAddressOperands(AM, DL, Base, Scale, Index, Disp, Segment); 1959 return true; 1960 } 1961 1962 /// This is only run on TargetGlobalTLSAddress nodes. 1963 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base, 1964 SDValue &Scale, SDValue &Index, 1965 SDValue &Disp, SDValue &Segment) { 1966 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1967 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1968 1969 X86ISelAddressMode AM; 1970 AM.GV = GA->getGlobal(); 1971 AM.Disp += GA->getOffset(); 1972 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1973 AM.SymbolFlags = GA->getTargetFlags(); 1974 1975 if (N.getValueType() == MVT::i32) { 1976 AM.Scale = 1; 1977 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1978 } else { 1979 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1980 } 1981 1982 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment); 1983 return true; 1984 } 1985 1986 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) { 1987 if (auto *CN = dyn_cast<ConstantSDNode>(N)) { 1988 Op = CurDAG->getTargetConstant(CN->getAPIntValue(), SDLoc(CN), 1989 N.getValueType()); 1990 return true; 1991 } 1992 1993 // Keep track of the original value type and whether this value was 1994 // truncated. If we see a truncation from pointer type to VT that truncates 1995 // bits that are known to be zero, we can use a narrow reference. 1996 EVT VT = N.getValueType(); 1997 bool WasTruncated = false; 1998 if (N.getOpcode() == ISD::TRUNCATE) { 1999 WasTruncated = true; 2000 N = N.getOperand(0); 2001 } 2002 2003 if (N.getOpcode() != X86ISD::Wrapper) 2004 return false; 2005 2006 // We can only use non-GlobalValues as immediates if they were not truncated, 2007 // as we do not have any range information. If we have a GlobalValue and the 2008 // address was not truncated, we can select it as an operand directly. 2009 unsigned Opc = N.getOperand(0)->getOpcode(); 2010 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) { 2011 Op = N.getOperand(0); 2012 // We can only select the operand directly if we didn't have to look past a 2013 // truncate. 2014 return !WasTruncated; 2015 } 2016 2017 // Check that the global's range fits into VT. 2018 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0)); 2019 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange(); 2020 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits())) 2021 return false; 2022 2023 // Okay, we can use a narrow reference. 2024 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT, 2025 GA->getOffset(), GA->getTargetFlags()); 2026 return true; 2027 } 2028 2029 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N, 2030 SDValue &Base, SDValue &Scale, 2031 SDValue &Index, SDValue &Disp, 2032 SDValue &Segment) { 2033 if (!ISD::isNON_EXTLoad(N.getNode()) || 2034 !IsProfitableToFold(N, P, Root) || 2035 !IsLegalToFold(N, P, Root, OptLevel)) 2036 return false; 2037 2038 return selectAddr(N.getNode(), 2039 N.getOperand(1), Base, Scale, Index, Disp, Segment); 2040 } 2041 2042 bool X86DAGToDAGISel::tryFoldVecLoad(SDNode *Root, SDNode *P, SDValue N, 2043 SDValue &Base, SDValue &Scale, 2044 SDValue &Index, SDValue &Disp, 2045 SDValue &Segment) { 2046 if (!ISD::isNON_EXTLoad(N.getNode()) || 2047 useNonTemporalLoad(cast<LoadSDNode>(N)) || 2048 !IsProfitableToFold(N, P, Root) || 2049 !IsLegalToFold(N, P, Root, OptLevel)) 2050 return false; 2051 2052 return selectAddr(N.getNode(), 2053 N.getOperand(1), Base, Scale, Index, Disp, Segment); 2054 } 2055 2056 /// Return an SDNode that returns the value of the global base register. 2057 /// Output instructions required to initialize the global base register, 2058 /// if necessary. 2059 SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 2060 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 2061 auto &DL = MF->getDataLayout(); 2062 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode(); 2063 } 2064 2065 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const { 2066 if (N->getOpcode() == ISD::TRUNCATE) 2067 N = N->getOperand(0).getNode(); 2068 if (N->getOpcode() != X86ISD::Wrapper) 2069 return false; 2070 2071 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0)); 2072 if (!GA) 2073 return false; 2074 2075 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange(); 2076 return CR && CR->getSignedMin().sge(-1ull << Width) && 2077 CR->getSignedMax().slt(1ull << Width); 2078 } 2079 2080 /// Test whether the given X86ISD::CMP node has any uses which require the SF 2081 /// or OF bits to be accurate. 2082 static bool hasNoSignedComparisonUses(SDNode *N) { 2083 // Examine each user of the node. 2084 for (SDNode::use_iterator UI = N->use_begin(), 2085 UE = N->use_end(); UI != UE; ++UI) { 2086 // Only examine CopyToReg uses. 2087 if (UI->getOpcode() != ISD::CopyToReg) 2088 return false; 2089 // Only examine CopyToReg uses that copy to EFLAGS. 2090 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 2091 X86::EFLAGS) 2092 return false; 2093 // Examine each user of the CopyToReg use. 2094 for (SDNode::use_iterator FlagUI = UI->use_begin(), 2095 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 2096 // Only examine the Flag result. 2097 if (FlagUI.getUse().getResNo() != 1) continue; 2098 // Anything unusual: assume conservatively. 2099 if (!FlagUI->isMachineOpcode()) return false; 2100 // Examine the opcode of the user. 2101 switch (FlagUI->getMachineOpcode()) { 2102 // These comparisons don't treat the most significant bit specially. 2103 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 2104 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 2105 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 2106 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 2107 case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1: 2108 case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1: 2109 case X86::CMOVA16rr: case X86::CMOVA16rm: 2110 case X86::CMOVA32rr: case X86::CMOVA32rm: 2111 case X86::CMOVA64rr: case X86::CMOVA64rm: 2112 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 2113 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 2114 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 2115 case X86::CMOVB16rr: case X86::CMOVB16rm: 2116 case X86::CMOVB32rr: case X86::CMOVB32rm: 2117 case X86::CMOVB64rr: case X86::CMOVB64rm: 2118 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 2119 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 2120 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 2121 case X86::CMOVE16rr: case X86::CMOVE16rm: 2122 case X86::CMOVE32rr: case X86::CMOVE32rm: 2123 case X86::CMOVE64rr: case X86::CMOVE64rm: 2124 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 2125 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 2126 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 2127 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 2128 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 2129 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 2130 case X86::CMOVP16rr: case X86::CMOVP16rm: 2131 case X86::CMOVP32rr: case X86::CMOVP32rm: 2132 case X86::CMOVP64rr: case X86::CMOVP64rm: 2133 continue; 2134 // Anything else: assume conservatively. 2135 default: return false; 2136 } 2137 } 2138 } 2139 return true; 2140 } 2141 2142 /// Test whether the given node which sets flags has any uses which require the 2143 /// CF flag to be accurate. 2144 static bool hasNoCarryFlagUses(SDNode *N) { 2145 // Examine each user of the node. 2146 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); UI != UE; 2147 ++UI) { 2148 // Only check things that use the flags. 2149 if (UI.getUse().getResNo() != 1) 2150 continue; 2151 // Only examine CopyToReg uses. 2152 if (UI->getOpcode() != ISD::CopyToReg) 2153 return false; 2154 // Only examine CopyToReg uses that copy to EFLAGS. 2155 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS) 2156 return false; 2157 // Examine each user of the CopyToReg use. 2158 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end(); 2159 FlagUI != FlagUE; ++FlagUI) { 2160 // Only examine the Flag result. 2161 if (FlagUI.getUse().getResNo() != 1) 2162 continue; 2163 // Anything unusual: assume conservatively. 2164 if (!FlagUI->isMachineOpcode()) 2165 return false; 2166 // Examine the opcode of the user. 2167 switch (FlagUI->getMachineOpcode()) { 2168 // Comparisons which don't examine the CF flag. 2169 case X86::SETOr: case X86::SETNOr: case X86::SETEr: case X86::SETNEr: 2170 case X86::SETSr: case X86::SETNSr: case X86::SETPr: case X86::SETNPr: 2171 case X86::SETLr: case X86::SETGEr: case X86::SETLEr: case X86::SETGr: 2172 case X86::JO_1: case X86::JNO_1: case X86::JE_1: case X86::JNE_1: 2173 case X86::JS_1: case X86::JNS_1: case X86::JP_1: case X86::JNP_1: 2174 case X86::JL_1: case X86::JGE_1: case X86::JLE_1: case X86::JG_1: 2175 case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr: 2176 case X86::CMOVO16rm: case X86::CMOVO32rm: case X86::CMOVO64rm: 2177 case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: 2178 case X86::CMOVNO16rm: case X86::CMOVNO32rm: case X86::CMOVNO64rm: 2179 case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr: 2180 case X86::CMOVE16rm: case X86::CMOVE32rm: case X86::CMOVE64rm: 2181 case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr: 2182 case X86::CMOVNE16rm: case X86::CMOVNE32rm: case X86::CMOVNE64rm: 2183 case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr: 2184 case X86::CMOVS16rm: case X86::CMOVS32rm: case X86::CMOVS64rm: 2185 case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr: 2186 case X86::CMOVNS16rm: case X86::CMOVNS32rm: case X86::CMOVNS64rm: 2187 case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr: 2188 case X86::CMOVP16rm: case X86::CMOVP32rm: case X86::CMOVP64rm: 2189 case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr: 2190 case X86::CMOVNP16rm: case X86::CMOVNP32rm: case X86::CMOVNP64rm: 2191 case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr: 2192 case X86::CMOVL16rm: case X86::CMOVL32rm: case X86::CMOVL64rm: 2193 case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr: 2194 case X86::CMOVGE16rm: case X86::CMOVGE32rm: case X86::CMOVGE64rm: 2195 case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr: 2196 case X86::CMOVLE16rm: case X86::CMOVLE32rm: case X86::CMOVLE64rm: 2197 case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr: 2198 case X86::CMOVG16rm: case X86::CMOVG32rm: case X86::CMOVG64rm: 2199 continue; 2200 // Anything else: assume conservatively. 2201 default: 2202 return false; 2203 } 2204 } 2205 } 2206 return true; 2207 } 2208 2209 /// Check whether or not the chain ending in StoreNode is suitable for doing 2210 /// the {load; op; store} to modify transformation. 2211 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode, 2212 SDValue StoredVal, SelectionDAG *CurDAG, 2213 LoadSDNode *&LoadNode, 2214 SDValue &InputChain) { 2215 // is the stored value result 0 of the load? 2216 if (StoredVal.getResNo() != 0) return false; 2217 2218 // are there other uses of the loaded value than the inc or dec? 2219 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 2220 2221 // is the store non-extending and non-indexed? 2222 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 2223 return false; 2224 2225 SDValue Load = StoredVal->getOperand(0); 2226 // Is the stored value a non-extending and non-indexed load? 2227 if (!ISD::isNormalLoad(Load.getNode())) return false; 2228 2229 // Return LoadNode by reference. 2230 LoadNode = cast<LoadSDNode>(Load); 2231 2232 // Is store the only read of the loaded value? 2233 if (!Load.hasOneUse()) 2234 return false; 2235 2236 // Is the address of the store the same as the load? 2237 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 2238 LoadNode->getOffset() != StoreNode->getOffset()) 2239 return false; 2240 2241 bool FoundLoad = false; 2242 SmallVector<SDValue, 4> ChainOps; 2243 SmallVector<const SDNode *, 4> LoopWorklist; 2244 SmallPtrSet<const SDNode *, 16> Visited; 2245 const unsigned int Max = 1024; 2246 2247 // Visualization of Load-Op-Store fusion: 2248 // ------------------------- 2249 // Legend: 2250 // *-lines = Chain operand dependencies. 2251 // |-lines = Normal operand dependencies. 2252 // Dependencies flow down and right. n-suffix references multiple nodes. 2253 // 2254 // C Xn C 2255 // * * * 2256 // * * * 2257 // Xn A-LD Yn TF Yn 2258 // * * \ | * | 2259 // * * \ | * | 2260 // * * \ | => A--LD_OP_ST 2261 // * * \| \ 2262 // TF OP \ 2263 // * | \ Zn 2264 // * | \ 2265 // A-ST Zn 2266 // 2267 2268 // This merge induced dependences from: #1: Xn -> LD, OP, Zn 2269 // #2: Yn -> LD 2270 // #3: ST -> Zn 2271 2272 // Ensure the transform is safe by checking for the dual 2273 // dependencies to make sure we do not induce a loop. 2274 2275 // As LD is a predecessor to both OP and ST we can do this by checking: 2276 // a). if LD is a predecessor to a member of Xn or Yn. 2277 // b). if a Zn is a predecessor to ST. 2278 2279 // However, (b) can only occur through being a chain predecessor to 2280 // ST, which is the same as Zn being a member or predecessor of Xn, 2281 // which is a subset of LD being a predecessor of Xn. So it's 2282 // subsumed by check (a). 2283 2284 SDValue Chain = StoreNode->getChain(); 2285 2286 // Gather X elements in ChainOps. 2287 if (Chain == Load.getValue(1)) { 2288 FoundLoad = true; 2289 ChainOps.push_back(Load.getOperand(0)); 2290 } else if (Chain.getOpcode() == ISD::TokenFactor) { 2291 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 2292 SDValue Op = Chain.getOperand(i); 2293 if (Op == Load.getValue(1)) { 2294 FoundLoad = true; 2295 // Drop Load, but keep its chain. No cycle check necessary. 2296 ChainOps.push_back(Load.getOperand(0)); 2297 continue; 2298 } 2299 LoopWorklist.push_back(Op.getNode()); 2300 ChainOps.push_back(Op); 2301 } 2302 } 2303 2304 if (!FoundLoad) 2305 return false; 2306 2307 // Worklist is currently Xn. Add Yn to worklist. 2308 for (SDValue Op : StoredVal->ops()) 2309 if (Op.getNode() != LoadNode) 2310 LoopWorklist.push_back(Op.getNode()); 2311 2312 // Check (a) if Load is a predecessor to Xn + Yn 2313 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max, 2314 true)) 2315 return false; 2316 2317 InputChain = 2318 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps); 2319 return true; 2320 } 2321 2322 // Change a chain of {load; op; store} of the same value into a simple op 2323 // through memory of that value, if the uses of the modified value and its 2324 // address are suitable. 2325 // 2326 // The tablegen pattern memory operand pattern is currently not able to match 2327 // the case where the EFLAGS on the original operation are used. 2328 // 2329 // To move this to tablegen, we'll need to improve tablegen to allow flags to 2330 // be transferred from a node in the pattern to the result node, probably with 2331 // a new keyword. For example, we have this 2332 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2333 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2334 // (implicit EFLAGS)]>; 2335 // but maybe need something like this 2336 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2337 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2338 // (transferrable EFLAGS)]>; 2339 // 2340 // Until then, we manually fold these and instruction select the operation 2341 // here. 2342 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) { 2343 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2344 SDValue StoredVal = StoreNode->getOperand(1); 2345 unsigned Opc = StoredVal->getOpcode(); 2346 2347 // Before we try to select anything, make sure this is memory operand size 2348 // and opcode we can handle. Note that this must match the code below that 2349 // actually lowers the opcodes. 2350 EVT MemVT = StoreNode->getMemoryVT(); 2351 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 && 2352 MemVT != MVT::i8) 2353 return false; 2354 switch (Opc) { 2355 default: 2356 return false; 2357 case X86ISD::INC: 2358 case X86ISD::DEC: 2359 case X86ISD::ADD: 2360 case X86ISD::ADC: 2361 case X86ISD::SUB: 2362 case X86ISD::SBB: 2363 case X86ISD::AND: 2364 case X86ISD::OR: 2365 case X86ISD::XOR: 2366 break; 2367 } 2368 2369 LoadSDNode *LoadNode = nullptr; 2370 SDValue InputChain; 2371 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode, 2372 InputChain)) 2373 return false; 2374 2375 SDValue Base, Scale, Index, Disp, Segment; 2376 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp, 2377 Segment)) 2378 return false; 2379 2380 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16, 2381 unsigned Opc8) { 2382 switch (MemVT.getSimpleVT().SimpleTy) { 2383 case MVT::i64: 2384 return Opc64; 2385 case MVT::i32: 2386 return Opc32; 2387 case MVT::i16: 2388 return Opc16; 2389 case MVT::i8: 2390 return Opc8; 2391 default: 2392 llvm_unreachable("Invalid size!"); 2393 } 2394 }; 2395 2396 MachineSDNode *Result; 2397 switch (Opc) { 2398 case X86ISD::INC: 2399 case X86ISD::DEC: { 2400 unsigned NewOpc = 2401 Opc == X86ISD::INC 2402 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m) 2403 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m); 2404 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain}; 2405 Result = 2406 CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, Ops); 2407 break; 2408 } 2409 case X86ISD::ADD: 2410 case X86ISD::ADC: 2411 case X86ISD::SUB: 2412 case X86ISD::SBB: 2413 case X86ISD::AND: 2414 case X86ISD::OR: 2415 case X86ISD::XOR: { 2416 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) { 2417 switch (Opc) { 2418 case X86ISD::ADD: 2419 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr, 2420 X86::ADD8mr); 2421 case X86ISD::ADC: 2422 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr, 2423 X86::ADC8mr); 2424 case X86ISD::SUB: 2425 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr, 2426 X86::SUB8mr); 2427 case X86ISD::SBB: 2428 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr, 2429 X86::SBB8mr); 2430 case X86ISD::AND: 2431 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr, 2432 X86::AND8mr); 2433 case X86ISD::OR: 2434 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr); 2435 case X86ISD::XOR: 2436 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr, 2437 X86::XOR8mr); 2438 default: 2439 llvm_unreachable("Invalid opcode!"); 2440 } 2441 }; 2442 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) { 2443 switch (Opc) { 2444 case X86ISD::ADD: 2445 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0); 2446 case X86ISD::ADC: 2447 return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0); 2448 case X86ISD::SUB: 2449 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0); 2450 case X86ISD::SBB: 2451 return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0); 2452 case X86ISD::AND: 2453 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0); 2454 case X86ISD::OR: 2455 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0); 2456 case X86ISD::XOR: 2457 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0); 2458 default: 2459 llvm_unreachable("Invalid opcode!"); 2460 } 2461 }; 2462 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) { 2463 switch (Opc) { 2464 case X86ISD::ADD: 2465 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi, 2466 X86::ADD8mi); 2467 case X86ISD::ADC: 2468 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi, 2469 X86::ADC8mi); 2470 case X86ISD::SUB: 2471 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi, 2472 X86::SUB8mi); 2473 case X86ISD::SBB: 2474 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi, 2475 X86::SBB8mi); 2476 case X86ISD::AND: 2477 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi, 2478 X86::AND8mi); 2479 case X86ISD::OR: 2480 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi, 2481 X86::OR8mi); 2482 case X86ISD::XOR: 2483 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi, 2484 X86::XOR8mi); 2485 default: 2486 llvm_unreachable("Invalid opcode!"); 2487 } 2488 }; 2489 2490 unsigned NewOpc = SelectRegOpcode(Opc); 2491 SDValue Operand = StoredVal->getOperand(1); 2492 2493 // See if the operand is a constant that we can fold into an immediate 2494 // operand. 2495 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) { 2496 auto OperandV = OperandC->getAPIntValue(); 2497 2498 // Check if we can shrink the operand enough to fit in an immediate (or 2499 // fit into a smaller immediate) by negating it and switching the 2500 // operation. 2501 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) && 2502 ((MemVT != MVT::i8 && OperandV.getMinSignedBits() > 8 && 2503 (-OperandV).getMinSignedBits() <= 8) || 2504 (MemVT == MVT::i64 && OperandV.getMinSignedBits() > 32 && 2505 (-OperandV).getMinSignedBits() <= 32)) && 2506 hasNoCarryFlagUses(StoredVal.getNode())) { 2507 OperandV = -OperandV; 2508 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD; 2509 } 2510 2511 // First try to fit this into an Imm8 operand. If it doesn't fit, then try 2512 // the larger immediate operand. 2513 if (MemVT != MVT::i8 && OperandV.getMinSignedBits() <= 8) { 2514 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT); 2515 NewOpc = SelectImm8Opcode(Opc); 2516 } else if (OperandV.getActiveBits() <= MemVT.getSizeInBits() && 2517 (MemVT != MVT::i64 || OperandV.getMinSignedBits() <= 32)) { 2518 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT); 2519 NewOpc = SelectImmOpcode(Opc); 2520 } 2521 } 2522 2523 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) { 2524 SDValue CopyTo = 2525 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS, 2526 StoredVal.getOperand(2), SDValue()); 2527 2528 const SDValue Ops[] = {Base, Scale, Index, Disp, 2529 Segment, Operand, CopyTo, CopyTo.getValue(1)}; 2530 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, 2531 Ops); 2532 } else { 2533 const SDValue Ops[] = {Base, Scale, Index, Disp, 2534 Segment, Operand, InputChain}; 2535 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other, 2536 Ops); 2537 } 2538 break; 2539 } 2540 default: 2541 llvm_unreachable("Invalid opcode!"); 2542 } 2543 2544 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2545 MemOp[0] = StoreNode->getMemOperand(); 2546 MemOp[1] = LoadNode->getMemOperand(); 2547 Result->setMemRefs(MemOp, MemOp + 2); 2548 2549 // Update Load Chain uses as well. 2550 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1)); 2551 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2552 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2553 CurDAG->RemoveDeadNode(Node); 2554 return true; 2555 } 2556 2557 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI. 2558 bool X86DAGToDAGISel::matchBEXTRFromAnd(SDNode *Node) { 2559 MVT NVT = Node->getSimpleValueType(0); 2560 SDLoc dl(Node); 2561 2562 SDValue N0 = Node->getOperand(0); 2563 SDValue N1 = Node->getOperand(1); 2564 2565 if (!Subtarget->hasBMI() && !Subtarget->hasTBM()) 2566 return false; 2567 2568 // Must have a shift right. 2569 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA) 2570 return false; 2571 2572 // Shift can't have additional users. 2573 if (!N0->hasOneUse()) 2574 return false; 2575 2576 // Only supported for 32 and 64 bits. 2577 if (NVT != MVT::i32 && NVT != MVT::i64) 2578 return false; 2579 2580 // Shift amount and RHS of and must be constant. 2581 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1); 2582 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2583 if (!MaskCst || !ShiftCst) 2584 return false; 2585 2586 // And RHS must be a mask. 2587 uint64_t Mask = MaskCst->getZExtValue(); 2588 if (!isMask_64(Mask)) 2589 return false; 2590 2591 uint64_t Shift = ShiftCst->getZExtValue(); 2592 uint64_t MaskSize = countPopulation(Mask); 2593 2594 // Don't interfere with something that can be handled by extracting AH. 2595 // TODO: If we are able to fold a load, BEXTR might still be better than AH. 2596 if (Shift == 8 && MaskSize == 8) 2597 return false; 2598 2599 // Make sure we are only using bits that were in the original value, not 2600 // shifted in. 2601 if (Shift + MaskSize > NVT.getSizeInBits()) 2602 return false; 2603 2604 // Create a BEXTR node and run it through selection. 2605 SDValue C = CurDAG->getConstant(Shift | (MaskSize << 8), dl, NVT); 2606 SDValue New = CurDAG->getNode(X86ISD::BEXTR, dl, NVT, 2607 N0->getOperand(0), C); 2608 ReplaceNode(Node, New.getNode()); 2609 SelectCode(New.getNode()); 2610 return true; 2611 } 2612 2613 // Emit a PCMISTR(I/M) instruction. 2614 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc, 2615 bool MayFoldLoad, const SDLoc &dl, 2616 MVT VT, SDNode *Node) { 2617 SDValue N0 = Node->getOperand(0); 2618 SDValue N1 = Node->getOperand(1); 2619 SDValue Imm = Node->getOperand(2); 2620 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue(); 2621 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType()); 2622 2623 // If there is a load, it will be behind a bitcast. We don't need to check 2624 // alignment on this load. 2625 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2626 if (MayFoldLoad && N1->getOpcode() == ISD::BITCAST && N1->hasOneUse() && 2627 tryFoldVecLoad(Node, N1.getNode(), N1.getOperand(0), Tmp0, Tmp1, Tmp2, 2628 Tmp3, Tmp4)) { 2629 SDValue Load = N1.getOperand(0); 2630 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm, 2631 Load.getOperand(0) }; 2632 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other); 2633 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2634 // Update the chain. 2635 ReplaceUses(Load.getValue(1), SDValue(CNode, 2)); 2636 // Record the mem-refs 2637 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2638 MemOp[0] = cast<LoadSDNode>(Load)->getMemOperand(); 2639 CNode->setMemRefs(MemOp, MemOp + 1); 2640 return CNode; 2641 } 2642 2643 SDValue Ops[] = { N0, N1, Imm }; 2644 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32); 2645 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops); 2646 return CNode; 2647 } 2648 2649 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need 2650 // to emit a second instruction after this one. This is needed since we have two 2651 // copyToReg nodes glued before this and we need to continue that glue through. 2652 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc, 2653 bool MayFoldLoad, const SDLoc &dl, 2654 MVT VT, SDNode *Node, 2655 SDValue &InFlag) { 2656 SDValue N0 = Node->getOperand(0); 2657 SDValue N2 = Node->getOperand(2); 2658 SDValue Imm = Node->getOperand(4); 2659 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue(); 2660 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType()); 2661 2662 // If there is a load, it will be behind a bitcast. We don't need to check 2663 // alignment on this load. 2664 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2665 if (MayFoldLoad && N2->getOpcode() == ISD::BITCAST && N2->hasOneUse() && 2666 tryFoldVecLoad(Node, N2.getNode(), N2.getOperand(0), Tmp0, Tmp1, Tmp2, 2667 Tmp3, Tmp4)) { 2668 SDValue Load = N2.getOperand(0); 2669 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm, 2670 Load.getOperand(0), InFlag }; 2671 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue); 2672 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 2673 InFlag = SDValue(CNode, 3); 2674 // Update the chain. 2675 ReplaceUses(Load.getValue(1), SDValue(CNode, 2)); 2676 // Record the mem-refs 2677 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 2678 MemOp[0] = cast<LoadSDNode>(Load)->getMemOperand(); 2679 CNode->setMemRefs(MemOp, MemOp + 1); 2680 return CNode; 2681 } 2682 2683 SDValue Ops[] = { N0, N2, Imm, InFlag }; 2684 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue); 2685 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops); 2686 InFlag = SDValue(CNode, 2); 2687 return CNode; 2688 } 2689 2690 /// If the high bits of an 'and' operand are known zero, try setting the 2691 /// high bits of an 'and' constant operand to produce a smaller encoding by 2692 /// creating a small, sign-extended negative immediate rather than a large 2693 /// positive one. This reverses a transform in SimplifyDemandedBits that 2694 /// shrinks mask constants by clearing bits. There is also a possibility that 2695 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that 2696 /// case, just replace the 'and'. Return 'true' if the node is replaced. 2697 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) { 2698 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't 2699 // have immediate operands. 2700 MVT VT = And->getSimpleValueType(0); 2701 if (VT != MVT::i32 && VT != MVT::i64) 2702 return false; 2703 2704 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1)); 2705 if (!And1C) 2706 return false; 2707 2708 // Bail out if the mask constant is already negative. It's can't shrink more. 2709 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel 2710 // patterns to use a 32-bit and instead of a 64-bit and by relying on the 2711 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits 2712 // are negative too. 2713 APInt MaskVal = And1C->getAPIntValue(); 2714 unsigned MaskLZ = MaskVal.countLeadingZeros(); 2715 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32)) 2716 return false; 2717 2718 // Don't extend into the upper 32 bits of a 64 bit mask. 2719 if (VT == MVT::i64 && MaskLZ >= 32) { 2720 MaskLZ -= 32; 2721 MaskVal = MaskVal.trunc(32); 2722 } 2723 2724 SDValue And0 = And->getOperand(0); 2725 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ); 2726 APInt NegMaskVal = MaskVal | HighZeros; 2727 2728 // If a negative constant would not allow a smaller encoding, there's no need 2729 // to continue. Only change the constant when we know it's a win. 2730 unsigned MinWidth = NegMaskVal.getMinSignedBits(); 2731 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32)) 2732 return false; 2733 2734 // Extend masks if we truncated above. 2735 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) { 2736 NegMaskVal = NegMaskVal.zext(64); 2737 HighZeros = HighZeros.zext(64); 2738 } 2739 2740 // The variable operand must be all zeros in the top bits to allow using the 2741 // new, negative constant as the mask. 2742 if (!CurDAG->MaskedValueIsZero(And0, HighZeros)) 2743 return false; 2744 2745 // Check if the mask is -1. In that case, this is an unnecessary instruction 2746 // that escaped earlier analysis. 2747 if (NegMaskVal.isAllOnesValue()) { 2748 ReplaceNode(And, And0.getNode()); 2749 return true; 2750 } 2751 2752 // A negative mask allows a smaller encoding. Create a new 'and' node. 2753 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT); 2754 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask); 2755 ReplaceNode(And, NewAnd.getNode()); 2756 SelectCode(NewAnd.getNode()); 2757 return true; 2758 } 2759 2760 void X86DAGToDAGISel::Select(SDNode *Node) { 2761 MVT NVT = Node->getSimpleValueType(0); 2762 unsigned Opcode = Node->getOpcode(); 2763 SDLoc dl(Node); 2764 2765 if (Node->isMachineOpcode()) { 2766 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 2767 Node->setNodeId(-1); 2768 return; // Already selected. 2769 } 2770 2771 switch (Opcode) { 2772 default: break; 2773 case ISD::BRIND: { 2774 if (Subtarget->isTargetNaCl()) 2775 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We 2776 // leave the instruction alone. 2777 break; 2778 if (Subtarget->isTarget64BitILP32()) { 2779 // Converts a 32-bit register to a 64-bit, zero-extended version of 2780 // it. This is needed because x86-64 can do many things, but jmp %r32 2781 // ain't one of them. 2782 const SDValue &Target = Node->getOperand(1); 2783 assert(Target.getSimpleValueType() == llvm::MVT::i32); 2784 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, EVT(MVT::i64)); 2785 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other, 2786 Node->getOperand(0), ZextTarget); 2787 ReplaceNode(Node, Brind.getNode()); 2788 SelectCode(ZextTarget.getNode()); 2789 SelectCode(Brind.getNode()); 2790 return; 2791 } 2792 break; 2793 } 2794 case X86ISD::GlobalBaseReg: 2795 ReplaceNode(Node, getGlobalBaseReg()); 2796 return; 2797 2798 case X86ISD::SELECT: 2799 case X86ISD::SHRUNKBLEND: { 2800 // SHRUNKBLEND selects like a regular VSELECT. Same with X86ISD::SELECT. 2801 SDValue VSelect = CurDAG->getNode( 2802 ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0), 2803 Node->getOperand(1), Node->getOperand(2)); 2804 ReplaceNode(Node, VSelect.getNode()); 2805 SelectCode(VSelect.getNode()); 2806 // We already called ReplaceUses. 2807 return; 2808 } 2809 2810 case ISD::AND: 2811 if (matchBEXTRFromAnd(Node)) 2812 return; 2813 if (shrinkAndImmediate(Node)) 2814 return; 2815 2816 LLVM_FALLTHROUGH; 2817 case ISD::OR: 2818 case ISD::XOR: { 2819 2820 // For operations of the form (x << C1) op C2, check if we can use a smaller 2821 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 2822 SDValue N0 = Node->getOperand(0); 2823 SDValue N1 = Node->getOperand(1); 2824 2825 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 2826 break; 2827 2828 // i8 is unshrinkable, i16 should be promoted to i32. 2829 if (NVT != MVT::i32 && NVT != MVT::i64) 2830 break; 2831 2832 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 2833 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2834 if (!Cst || !ShlCst) 2835 break; 2836 2837 int64_t Val = Cst->getSExtValue(); 2838 uint64_t ShlVal = ShlCst->getZExtValue(); 2839 2840 // Make sure that we don't change the operation by removing bits. 2841 // This only matters for OR and XOR, AND is unaffected. 2842 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1; 2843 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) 2844 break; 2845 2846 unsigned ShlOp, AddOp, Op; 2847 MVT CstVT = NVT; 2848 2849 // Check the minimum bitwidth for the new constant. 2850 // TODO: AND32ri is the same as AND64ri32 with zext imm. 2851 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 2852 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 2853 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 2854 CstVT = MVT::i8; 2855 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 2856 CstVT = MVT::i32; 2857 2858 // Bail if there is no smaller encoding. 2859 if (NVT == CstVT) 2860 break; 2861 2862 switch (NVT.SimpleTy) { 2863 default: llvm_unreachable("Unsupported VT!"); 2864 case MVT::i32: 2865 assert(CstVT == MVT::i8); 2866 ShlOp = X86::SHL32ri; 2867 AddOp = X86::ADD32rr; 2868 2869 switch (Opcode) { 2870 default: llvm_unreachable("Impossible opcode"); 2871 case ISD::AND: Op = X86::AND32ri8; break; 2872 case ISD::OR: Op = X86::OR32ri8; break; 2873 case ISD::XOR: Op = X86::XOR32ri8; break; 2874 } 2875 break; 2876 case MVT::i64: 2877 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 2878 ShlOp = X86::SHL64ri; 2879 AddOp = X86::ADD64rr; 2880 2881 switch (Opcode) { 2882 default: llvm_unreachable("Impossible opcode"); 2883 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 2884 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2885 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2886 } 2887 break; 2888 } 2889 2890 // Emit the smaller op and the shift. 2891 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT); 2892 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2893 if (ShlVal == 1) 2894 CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0), 2895 SDValue(New, 0)); 2896 else 2897 CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2898 getI8Imm(ShlVal, dl)); 2899 return; 2900 } 2901 case X86ISD::UMUL8: 2902 case X86ISD::SMUL8: { 2903 SDValue N0 = Node->getOperand(0); 2904 SDValue N1 = Node->getOperand(1); 2905 2906 unsigned Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r); 2907 2908 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL, 2909 N0, SDValue()).getValue(1); 2910 2911 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32); 2912 SDValue Ops[] = {N1, InFlag}; 2913 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2914 2915 ReplaceNode(Node, CNode); 2916 return; 2917 } 2918 2919 case X86ISD::UMUL: { 2920 SDValue N0 = Node->getOperand(0); 2921 SDValue N1 = Node->getOperand(1); 2922 2923 unsigned LoReg, Opc; 2924 switch (NVT.SimpleTy) { 2925 default: llvm_unreachable("Unsupported VT!"); 2926 // MVT::i8 is handled by X86ISD::UMUL8. 2927 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2928 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2929 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2930 } 2931 2932 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2933 N0, SDValue()).getValue(1); 2934 2935 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2936 SDValue Ops[] = {N1, InFlag}; 2937 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 2938 2939 ReplaceNode(Node, CNode); 2940 return; 2941 } 2942 2943 case ISD::SMUL_LOHI: 2944 case ISD::UMUL_LOHI: { 2945 SDValue N0 = Node->getOperand(0); 2946 SDValue N1 = Node->getOperand(1); 2947 2948 unsigned Opc, MOpc; 2949 bool isSigned = Opcode == ISD::SMUL_LOHI; 2950 bool hasBMI2 = Subtarget->hasBMI2(); 2951 if (!isSigned) { 2952 switch (NVT.SimpleTy) { 2953 default: llvm_unreachable("Unsupported VT!"); 2954 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r; 2955 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break; 2956 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r; 2957 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break; 2958 } 2959 } else { 2960 switch (NVT.SimpleTy) { 2961 default: llvm_unreachable("Unsupported VT!"); 2962 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2963 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2964 } 2965 } 2966 2967 unsigned SrcReg, LoReg, HiReg; 2968 switch (Opc) { 2969 default: llvm_unreachable("Unknown MUL opcode!"); 2970 case X86::IMUL32r: 2971 case X86::MUL32r: 2972 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX; 2973 break; 2974 case X86::IMUL64r: 2975 case X86::MUL64r: 2976 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX; 2977 break; 2978 case X86::MULX32rr: 2979 SrcReg = X86::EDX; LoReg = HiReg = 0; 2980 break; 2981 case X86::MULX64rr: 2982 SrcReg = X86::RDX; LoReg = HiReg = 0; 2983 break; 2984 } 2985 2986 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2987 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2988 // Multiply is commmutative. 2989 if (!foldedLoad) { 2990 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2991 if (foldedLoad) 2992 std::swap(N0, N1); 2993 } 2994 2995 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg, 2996 N0, SDValue()).getValue(1); 2997 SDValue ResHi, ResLo; 2998 2999 if (foldedLoad) { 3000 SDValue Chain; 3001 MachineSDNode *CNode = nullptr; 3002 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 3003 InFlag }; 3004 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { 3005 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); 3006 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 3007 ResHi = SDValue(CNode, 0); 3008 ResLo = SDValue(CNode, 1); 3009 Chain = SDValue(CNode, 2); 3010 InFlag = SDValue(CNode, 3); 3011 } else { 3012 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); 3013 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops); 3014 Chain = SDValue(CNode, 0); 3015 InFlag = SDValue(CNode, 1); 3016 } 3017 3018 // Update the chain. 3019 ReplaceUses(N1.getValue(1), Chain); 3020 // Record the mem-refs 3021 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 3022 MemOp[0] = cast<LoadSDNode>(N1)->getMemOperand(); 3023 CNode->setMemRefs(MemOp, MemOp + 1); 3024 } else { 3025 SDValue Ops[] = { N1, InFlag }; 3026 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { 3027 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); 3028 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 3029 ResHi = SDValue(CNode, 0); 3030 ResLo = SDValue(CNode, 1); 3031 InFlag = SDValue(CNode, 2); 3032 } else { 3033 SDVTList VTs = CurDAG->getVTList(MVT::Glue); 3034 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops); 3035 InFlag = SDValue(CNode, 0); 3036 } 3037 } 3038 3039 // Copy the low half of the result, if it is needed. 3040 if (!SDValue(Node, 0).use_empty()) { 3041 if (!ResLo.getNode()) { 3042 assert(LoReg && "Register for low half is not defined!"); 3043 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, 3044 InFlag); 3045 InFlag = ResLo.getValue(2); 3046 } 3047 ReplaceUses(SDValue(Node, 0), ResLo); 3048 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); 3049 dbgs() << '\n'); 3050 } 3051 // Copy the high half of the result, if it is needed. 3052 if (!SDValue(Node, 1).use_empty()) { 3053 if (!ResHi.getNode()) { 3054 assert(HiReg && "Register for high half is not defined!"); 3055 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, 3056 InFlag); 3057 InFlag = ResHi.getValue(2); 3058 } 3059 ReplaceUses(SDValue(Node, 1), ResHi); 3060 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); 3061 dbgs() << '\n'); 3062 } 3063 3064 CurDAG->RemoveDeadNode(Node); 3065 return; 3066 } 3067 3068 case ISD::SDIVREM: 3069 case ISD::UDIVREM: 3070 case X86ISD::SDIVREM8_SEXT_HREG: 3071 case X86ISD::UDIVREM8_ZEXT_HREG: { 3072 SDValue N0 = Node->getOperand(0); 3073 SDValue N1 = Node->getOperand(1); 3074 3075 unsigned Opc, MOpc; 3076 bool isSigned = (Opcode == ISD::SDIVREM || 3077 Opcode == X86ISD::SDIVREM8_SEXT_HREG); 3078 if (!isSigned) { 3079 switch (NVT.SimpleTy) { 3080 default: llvm_unreachable("Unsupported VT!"); 3081 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 3082 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 3083 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 3084 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 3085 } 3086 } else { 3087 switch (NVT.SimpleTy) { 3088 default: llvm_unreachable("Unsupported VT!"); 3089 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 3090 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 3091 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 3092 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 3093 } 3094 } 3095 3096 unsigned LoReg, HiReg, ClrReg; 3097 unsigned SExtOpcode; 3098 switch (NVT.SimpleTy) { 3099 default: llvm_unreachable("Unsupported VT!"); 3100 case MVT::i8: 3101 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 3102 SExtOpcode = X86::CBW; 3103 break; 3104 case MVT::i16: 3105 LoReg = X86::AX; HiReg = X86::DX; 3106 ClrReg = X86::DX; 3107 SExtOpcode = X86::CWD; 3108 break; 3109 case MVT::i32: 3110 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 3111 SExtOpcode = X86::CDQ; 3112 break; 3113 case MVT::i64: 3114 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 3115 SExtOpcode = X86::CQO; 3116 break; 3117 } 3118 3119 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 3120 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 3121 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 3122 3123 SDValue InFlag; 3124 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 3125 // Special case for div8, just use a move with zero extension to AX to 3126 // clear the upper 8 bits (AH). 3127 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 3128 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 3129 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 3130 Move = 3131 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 3132 MVT::Other, Ops), 0); 3133 Chain = Move.getValue(1); 3134 ReplaceUses(N0.getValue(1), Chain); 3135 } else { 3136 Move = 3137 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 3138 Chain = CurDAG->getEntryNode(); 3139 } 3140 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 3141 InFlag = Chain.getValue(1); 3142 } else { 3143 InFlag = 3144 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 3145 LoReg, N0, SDValue()).getValue(1); 3146 if (isSigned && !signBitIsZero) { 3147 // Sign extend the low part into the high part. 3148 InFlag = 3149 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 3150 } else { 3151 // Zero out the high part, effectively zero extending the input. 3152 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0); 3153 switch (NVT.SimpleTy) { 3154 case MVT::i16: 3155 ClrNode = 3156 SDValue(CurDAG->getMachineNode( 3157 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode, 3158 CurDAG->getTargetConstant(X86::sub_16bit, dl, 3159 MVT::i32)), 3160 0); 3161 break; 3162 case MVT::i32: 3163 break; 3164 case MVT::i64: 3165 ClrNode = 3166 SDValue(CurDAG->getMachineNode( 3167 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64, 3168 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode, 3169 CurDAG->getTargetConstant(X86::sub_32bit, dl, 3170 MVT::i32)), 3171 0); 3172 break; 3173 default: 3174 llvm_unreachable("Unexpected division source"); 3175 } 3176 3177 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 3178 ClrNode, InFlag).getValue(1); 3179 } 3180 } 3181 3182 if (foldedLoad) { 3183 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 3184 InFlag }; 3185 MachineSDNode *CNode = 3186 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops); 3187 InFlag = SDValue(CNode, 1); 3188 // Update the chain. 3189 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 3190 // Record the mem-refs 3191 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 3192 MemOp[0] = cast<LoadSDNode>(N1)->getMemOperand(); 3193 CNode->setMemRefs(MemOp, MemOp + 1); 3194 } else { 3195 InFlag = 3196 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 3197 } 3198 3199 // Prevent use of AH in a REX instruction by explicitly copying it to 3200 // an ABCD_L register. 3201 // 3202 // The current assumption of the register allocator is that isel 3203 // won't generate explicit references to the GR8_ABCD_H registers. If 3204 // the allocator and/or the backend get enhanced to be more robust in 3205 // that regard, this can be, and should be, removed. 3206 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) { 3207 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8); 3208 unsigned AHExtOpcode = 3209 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX; 3210 3211 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32, 3212 MVT::Glue, AHCopy, InFlag); 3213 SDValue Result(RNode, 0); 3214 InFlag = SDValue(RNode, 1); 3215 3216 if (Opcode == X86ISD::UDIVREM8_ZEXT_HREG || 3217 Opcode == X86ISD::SDIVREM8_SEXT_HREG) { 3218 assert(Node->getValueType(1) == MVT::i32 && "Unexpected result type!"); 3219 } else { 3220 Result = 3221 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result); 3222 } 3223 ReplaceUses(SDValue(Node, 1), Result); 3224 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); 3225 dbgs() << '\n'); 3226 } 3227 // Copy the division (low) result, if it is needed. 3228 if (!SDValue(Node, 0).use_empty()) { 3229 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 3230 LoReg, NVT, InFlag); 3231 InFlag = Result.getValue(2); 3232 ReplaceUses(SDValue(Node, 0), Result); 3233 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); 3234 dbgs() << '\n'); 3235 } 3236 // Copy the remainder (high) result, if it is needed. 3237 if (!SDValue(Node, 1).use_empty()) { 3238 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 3239 HiReg, NVT, InFlag); 3240 InFlag = Result.getValue(2); 3241 ReplaceUses(SDValue(Node, 1), Result); 3242 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); 3243 dbgs() << '\n'); 3244 } 3245 CurDAG->RemoveDeadNode(Node); 3246 return; 3247 } 3248 3249 case X86ISD::CMP: { 3250 SDValue N0 = Node->getOperand(0); 3251 SDValue N1 = Node->getOperand(1); 3252 3253 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 3254 hasNoSignedComparisonUses(Node)) 3255 N0 = N0.getOperand(0); 3256 3257 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 3258 // use a smaller encoding. 3259 // Look past the truncate if CMP is the only use of it. 3260 if (N0.getOpcode() == ISD::AND && 3261 N0.getNode()->hasOneUse() && 3262 N0.getValueType() != MVT::i8 && 3263 X86::isZeroNode(N1)) { 3264 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3265 if (!C) break; 3266 uint64_t Mask = C->getZExtValue(); 3267 3268 MVT VT; 3269 int SubRegOp; 3270 unsigned Op; 3271 3272 if (isUInt<8>(Mask) && 3273 (!(Mask & 0x80) || hasNoSignedComparisonUses(Node))) { 3274 // For example, convert "testl %eax, $8" to "testb %al, $8" 3275 VT = MVT::i8; 3276 SubRegOp = X86::sub_8bit; 3277 Op = X86::TEST8ri; 3278 } else if (OptForMinSize && isUInt<16>(Mask) && 3279 (!(Mask & 0x8000) || hasNoSignedComparisonUses(Node))) { 3280 // For example, "testl %eax, $32776" to "testw %ax, $32776". 3281 // NOTE: We only want to form TESTW instructions if optimizing for 3282 // min size. Otherwise we only save one byte and possibly get a length 3283 // changing prefix penalty in the decoders. 3284 VT = MVT::i16; 3285 SubRegOp = X86::sub_16bit; 3286 Op = X86::TEST16ri; 3287 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 && 3288 (!(Mask & 0x80000000) || hasNoSignedComparisonUses(Node))) { 3289 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 3290 // NOTE: We only want to run that transform if N0 is 32 or 64 bits. 3291 // Otherwize, we find ourselves in a position where we have to do 3292 // promotion. If previous passes did not promote the and, we assume 3293 // they had a good reason not to and do not promote here. 3294 VT = MVT::i32; 3295 SubRegOp = X86::sub_32bit; 3296 Op = X86::TEST32ri; 3297 } else { 3298 // No eligible transformation was found. 3299 break; 3300 } 3301 3302 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT); 3303 SDValue Reg = N0.getOperand(0); 3304 3305 // Extract the subregister if necessary. 3306 if (N0.getValueType() != VT) 3307 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg); 3308 3309 // Emit a testl or testw. 3310 SDNode *NewNode = CurDAG->getMachineNode(Op, dl, MVT::i32, Reg, Imm); 3311 // Replace CMP with TEST. 3312 ReplaceNode(Node, NewNode); 3313 return; 3314 } 3315 break; 3316 } 3317 case X86ISD::PCMPISTR: { 3318 if (!Subtarget->hasSSE42()) 3319 break; 3320 3321 bool NeedIndex = !SDValue(Node, 0).use_empty(); 3322 bool NeedMask = !SDValue(Node, 1).use_empty(); 3323 // We can't fold a load if we are going to make two instructions. 3324 bool MayFoldLoad = !NeedIndex || !NeedMask; 3325 3326 MachineSDNode *CNode; 3327 if (NeedMask) { 3328 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr; 3329 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm; 3330 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node); 3331 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0)); 3332 } 3333 if (NeedIndex || !NeedMask) { 3334 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr; 3335 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm; 3336 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node); 3337 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 3338 } 3339 3340 // Connect the flag usage to the last instruction created. 3341 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1)); 3342 CurDAG->RemoveDeadNode(Node); 3343 return; 3344 } 3345 case X86ISD::PCMPESTR: { 3346 if (!Subtarget->hasSSE42()) 3347 break; 3348 3349 // Copy the two implicit register inputs. 3350 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX, 3351 Node->getOperand(1), 3352 SDValue()).getValue(1); 3353 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX, 3354 Node->getOperand(3), InFlag).getValue(1); 3355 3356 bool NeedIndex = !SDValue(Node, 0).use_empty(); 3357 bool NeedMask = !SDValue(Node, 1).use_empty(); 3358 // We can't fold a load if we are going to make two instructions. 3359 bool MayFoldLoad = !NeedIndex || !NeedMask; 3360 3361 MachineSDNode *CNode; 3362 if (NeedMask) { 3363 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr; 3364 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm; 3365 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node, 3366 InFlag); 3367 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0)); 3368 } 3369 if (NeedIndex || !NeedMask) { 3370 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr; 3371 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm; 3372 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InFlag); 3373 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 3374 } 3375 // Connect the flag usage to the last instruction created. 3376 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1)); 3377 CurDAG->RemoveDeadNode(Node); 3378 return; 3379 } 3380 3381 case ISD::STORE: 3382 if (foldLoadStoreIntoMemOperand(Node)) 3383 return; 3384 break; 3385 } 3386 3387 SelectCode(Node); 3388 } 3389 3390 bool X86DAGToDAGISel:: 3391 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, 3392 std::vector<SDValue> &OutOps) { 3393 SDValue Op0, Op1, Op2, Op3, Op4; 3394 switch (ConstraintID) { 3395 default: 3396 llvm_unreachable("Unexpected asm memory constraint"); 3397 case InlineAsm::Constraint_i: 3398 // FIXME: It seems strange that 'i' is needed here since it's supposed to 3399 // be an immediate and not a memory constraint. 3400 LLVM_FALLTHROUGH; 3401 case InlineAsm::Constraint_o: // offsetable ?? 3402 case InlineAsm::Constraint_v: // not offsetable ?? 3403 case InlineAsm::Constraint_m: // memory 3404 case InlineAsm::Constraint_X: 3405 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4)) 3406 return true; 3407 break; 3408 } 3409 3410 OutOps.push_back(Op0); 3411 OutOps.push_back(Op1); 3412 OutOps.push_back(Op2); 3413 OutOps.push_back(Op3); 3414 OutOps.push_back(Op4); 3415 return false; 3416 } 3417 3418 /// This pass converts a legalized DAG into a X86-specific DAG, 3419 /// ready for instruction scheduling. 3420 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 3421 CodeGenOpt::Level OptLevel) { 3422 return new X86DAGToDAGISel(TM, OptLevel); 3423 } 3424