1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines a DAG pattern matching instruction selector for X86, 11 // converting from a legalized dag to a X86 dag. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "x86-isel" 16 #include "X86.h" 17 #include "X86InstrBuilder.h" 18 #include "X86MachineFunctionInfo.h" 19 #include "X86RegisterInfo.h" 20 #include "X86Subtarget.h" 21 #include "X86TargetMachine.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/SelectionDAGISel.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/IR/Intrinsics.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Target/TargetMachine.h" 36 #include "llvm/Target/TargetOptions.h" 37 using namespace llvm; 38 39 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); 40 41 //===----------------------------------------------------------------------===// 42 // Pattern Matcher Implementation 43 //===----------------------------------------------------------------------===// 44 45 namespace { 46 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses 47 /// SDValue's instead of register numbers for the leaves of the matched 48 /// tree. 49 struct X86ISelAddressMode { 50 enum { 51 RegBase, 52 FrameIndexBase 53 } BaseType; 54 55 // This is really a union, discriminated by BaseType! 56 SDValue Base_Reg; 57 int Base_FrameIndex; 58 59 unsigned Scale; 60 SDValue IndexReg; 61 int32_t Disp; 62 SDValue Segment; 63 const GlobalValue *GV; 64 const Constant *CP; 65 const BlockAddress *BlockAddr; 66 const char *ES; 67 int JT; 68 unsigned Align; // CP alignment. 69 unsigned char SymbolFlags; // X86II::MO_* 70 71 X86ISelAddressMode() 72 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0), 73 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0), 74 SymbolFlags(X86II::MO_NO_FLAG) { 75 } 76 77 bool hasSymbolicDisplacement() const { 78 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0; 79 } 80 81 bool hasBaseOrIndexReg() const { 82 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0; 83 } 84 85 /// isRIPRelative - Return true if this addressing mode is already RIP 86 /// relative. 87 bool isRIPRelative() const { 88 if (BaseType != RegBase) return false; 89 if (RegisterSDNode *RegNode = 90 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode())) 91 return RegNode->getReg() == X86::RIP; 92 return false; 93 } 94 95 void setBaseReg(SDValue Reg) { 96 BaseType = RegBase; 97 Base_Reg = Reg; 98 } 99 100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 101 void dump() { 102 dbgs() << "X86ISelAddressMode " << this << '\n'; 103 dbgs() << "Base_Reg "; 104 if (Base_Reg.getNode() != 0) 105 Base_Reg.getNode()->dump(); 106 else 107 dbgs() << "nul"; 108 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n' 109 << " Scale" << Scale << '\n' 110 << "IndexReg "; 111 if (IndexReg.getNode() != 0) 112 IndexReg.getNode()->dump(); 113 else 114 dbgs() << "nul"; 115 dbgs() << " Disp " << Disp << '\n' 116 << "GV "; 117 if (GV) 118 GV->dump(); 119 else 120 dbgs() << "nul"; 121 dbgs() << " CP "; 122 if (CP) 123 CP->dump(); 124 else 125 dbgs() << "nul"; 126 dbgs() << '\n' 127 << "ES "; 128 if (ES) 129 dbgs() << ES; 130 else 131 dbgs() << "nul"; 132 dbgs() << " JT" << JT << " Align" << Align << '\n'; 133 } 134 #endif 135 }; 136 } 137 138 namespace { 139 //===--------------------------------------------------------------------===// 140 /// ISel - X86 specific code to select X86 machine instructions for 141 /// SelectionDAG operations. 142 /// 143 class X86DAGToDAGISel : public SelectionDAGISel { 144 /// X86Lowering - This object fully describes how to lower LLVM code to an 145 /// X86-specific SelectionDAG. 146 const X86TargetLowering &X86Lowering; 147 148 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can 149 /// make the right decision when generating code for different targets. 150 const X86Subtarget *Subtarget; 151 152 /// OptForSize - If true, selector should try to optimize for code size 153 /// instead of performance. 154 bool OptForSize; 155 156 public: 157 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel) 158 : SelectionDAGISel(tm, OptLevel), 159 X86Lowering(*tm.getTargetLowering()), 160 Subtarget(&tm.getSubtarget<X86Subtarget>()), 161 OptForSize(false) {} 162 163 virtual const char *getPassName() const { 164 return "X86 DAG->DAG Instruction Selection"; 165 } 166 167 virtual void EmitFunctionEntryCode(); 168 169 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const; 170 171 virtual void PreprocessISelDAG(); 172 173 inline bool immSext8(SDNode *N) const { 174 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); 175 } 176 177 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit 178 // sign extended field. 179 inline bool i64immSExt32(SDNode *N) const { 180 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); 181 return (int64_t)v == (int32_t)v; 182 } 183 184 // Include the pieces autogenerated from the target description. 185 #include "X86GenDAGISel.inc" 186 187 private: 188 SDNode *Select(SDNode *N); 189 SDNode *SelectGather(SDNode *N, unsigned Opc); 190 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); 191 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT); 192 193 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); 194 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM); 195 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM); 196 bool MatchAddress(SDValue N, X86ISelAddressMode &AM); 197 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 198 unsigned Depth); 199 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM); 200 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 201 SDValue &Scale, SDValue &Index, SDValue &Disp, 202 SDValue &Segment); 203 bool SelectLEAAddr(SDValue N, SDValue &Base, 204 SDValue &Scale, SDValue &Index, SDValue &Disp, 205 SDValue &Segment); 206 bool SelectTLSADDRAddr(SDValue N, SDValue &Base, 207 SDValue &Scale, SDValue &Index, SDValue &Disp, 208 SDValue &Segment); 209 bool SelectScalarSSELoad(SDNode *Root, SDValue N, 210 SDValue &Base, SDValue &Scale, 211 SDValue &Index, SDValue &Disp, 212 SDValue &Segment, 213 SDValue &NodeWithChain); 214 215 bool TryFoldLoad(SDNode *P, SDValue N, 216 SDValue &Base, SDValue &Scale, 217 SDValue &Index, SDValue &Disp, 218 SDValue &Segment); 219 220 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 221 /// inline asm expressions. 222 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, 223 char ConstraintCode, 224 std::vector<SDValue> &OutOps); 225 226 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); 227 228 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, 229 SDValue &Scale, SDValue &Index, 230 SDValue &Disp, SDValue &Segment) { 231 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? 232 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) : 233 AM.Base_Reg; 234 Scale = getI8Imm(AM.Scale); 235 Index = AM.IndexReg; 236 // These are 32-bit even in 64-bit mode since RIP relative offset 237 // is 32-bit. 238 if (AM.GV) 239 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(), 240 MVT::i32, AM.Disp, 241 AM.SymbolFlags); 242 else if (AM.CP) 243 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, 244 AM.Align, AM.Disp, AM.SymbolFlags); 245 else if (AM.ES) { 246 assert(!AM.Disp && "Non-zero displacement is ignored with ES."); 247 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); 248 } else if (AM.JT != -1) { 249 assert(!AM.Disp && "Non-zero displacement is ignored with JT."); 250 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); 251 } else if (AM.BlockAddr) 252 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, 253 AM.SymbolFlags); 254 else 255 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); 256 257 if (AM.Segment.getNode()) 258 Segment = AM.Segment; 259 else 260 Segment = CurDAG->getRegister(0, MVT::i32); 261 } 262 263 /// getI8Imm - Return a target constant with the specified value, of type 264 /// i8. 265 inline SDValue getI8Imm(unsigned Imm) { 266 return CurDAG->getTargetConstant(Imm, MVT::i8); 267 } 268 269 /// getI32Imm - Return a target constant with the specified value, of type 270 /// i32. 271 inline SDValue getI32Imm(unsigned Imm) { 272 return CurDAG->getTargetConstant(Imm, MVT::i32); 273 } 274 275 /// getGlobalBaseReg - Return an SDNode that returns the value of 276 /// the global base register. Output instructions required to 277 /// initialize the global base register, if necessary. 278 /// 279 SDNode *getGlobalBaseReg(); 280 281 /// getTargetMachine - Return a reference to the TargetMachine, casted 282 /// to the target-specific type. 283 const X86TargetMachine &getTargetMachine() const { 284 return static_cast<const X86TargetMachine &>(TM); 285 } 286 287 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted 288 /// to the target-specific type. 289 const X86InstrInfo *getInstrInfo() const { 290 return getTargetMachine().getInstrInfo(); 291 } 292 }; 293 } 294 295 296 bool 297 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { 298 if (OptLevel == CodeGenOpt::None) return false; 299 300 if (!N.hasOneUse()) 301 return false; 302 303 if (N.getOpcode() != ISD::LOAD) 304 return true; 305 306 // If N is a load, do additional profitability checks. 307 if (U == Root) { 308 switch (U->getOpcode()) { 309 default: break; 310 case X86ISD::ADD: 311 case X86ISD::SUB: 312 case X86ISD::AND: 313 case X86ISD::XOR: 314 case X86ISD::OR: 315 case ISD::ADD: 316 case ISD::ADDC: 317 case ISD::ADDE: 318 case ISD::AND: 319 case ISD::OR: 320 case ISD::XOR: { 321 SDValue Op1 = U->getOperand(1); 322 323 // If the other operand is a 8-bit immediate we should fold the immediate 324 // instead. This reduces code size. 325 // e.g. 326 // movl 4(%esp), %eax 327 // addl $4, %eax 328 // vs. 329 // movl $4, %eax 330 // addl 4(%esp), %eax 331 // The former is 2 bytes shorter. In case where the increment is 1, then 332 // the saving can be 4 bytes (by using incl %eax). 333 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) 334 if (Imm->getAPIntValue().isSignedIntN(8)) 335 return false; 336 337 // If the other operand is a TLS address, we should fold it instead. 338 // This produces 339 // movl %gs:0, %eax 340 // leal i@NTPOFF(%eax), %eax 341 // instead of 342 // movl $i@NTPOFF, %eax 343 // addl %gs:0, %eax 344 // if the block also has an access to a second TLS address this will save 345 // a load. 346 // FIXME: This is probably also true for non TLS addresses. 347 if (Op1.getOpcode() == X86ISD::Wrapper) { 348 SDValue Val = Op1.getOperand(0); 349 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress) 350 return false; 351 } 352 } 353 } 354 } 355 356 return true; 357 } 358 359 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with 360 /// load's chain operand and move load below the call's chain operand. 361 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, 362 SDValue Call, SDValue OrigChain) { 363 SmallVector<SDValue, 8> Ops; 364 SDValue Chain = OrigChain.getOperand(0); 365 if (Chain.getNode() == Load.getNode()) 366 Ops.push_back(Load.getOperand(0)); 367 else { 368 assert(Chain.getOpcode() == ISD::TokenFactor && 369 "Unexpected chain operand"); 370 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) 371 if (Chain.getOperand(i).getNode() == Load.getNode()) 372 Ops.push_back(Load.getOperand(0)); 373 else 374 Ops.push_back(Chain.getOperand(i)); 375 SDValue NewChain = 376 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(), 377 MVT::Other, &Ops[0], Ops.size()); 378 Ops.clear(); 379 Ops.push_back(NewChain); 380 } 381 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i) 382 Ops.push_back(OrigChain.getOperand(i)); 383 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size()); 384 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), 385 Load.getOperand(1), Load.getOperand(2)); 386 387 unsigned NumOps = Call.getNode()->getNumOperands(); 388 Ops.clear(); 389 Ops.push_back(SDValue(Load.getNode(), 1)); 390 for (unsigned i = 1, e = NumOps; i != e; ++i) 391 Ops.push_back(Call.getOperand(i)); 392 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps); 393 } 394 395 /// isCalleeLoad - Return true if call address is a load and it can be 396 /// moved below CALLSEQ_START and the chains leading up to the call. 397 /// Return the CALLSEQ_START by reference as a second output. 398 /// In the case of a tail call, there isn't a callseq node between the call 399 /// chain and the load. 400 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { 401 // The transformation is somewhat dangerous if the call's chain was glued to 402 // the call. After MoveBelowOrigChain the load is moved between the call and 403 // the chain, this can create a cycle if the load is not folded. So it is 404 // *really* important that we are sure the load will be folded. 405 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) 406 return false; 407 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode()); 408 if (!LD || 409 LD->isVolatile() || 410 LD->getAddressingMode() != ISD::UNINDEXED || 411 LD->getExtensionType() != ISD::NON_EXTLOAD) 412 return false; 413 414 // Now let's find the callseq_start. 415 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) { 416 if (!Chain.hasOneUse()) 417 return false; 418 Chain = Chain.getOperand(0); 419 } 420 421 if (!Chain.getNumOperands()) 422 return false; 423 // Since we are not checking for AA here, conservatively abort if the chain 424 // writes to memory. It's not safe to move the callee (a load) across a store. 425 if (isa<MemSDNode>(Chain.getNode()) && 426 cast<MemSDNode>(Chain.getNode())->writeMem()) 427 return false; 428 if (Chain.getOperand(0).getNode() == Callee.getNode()) 429 return true; 430 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor && 431 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) && 432 Callee.getValue(1).hasOneUse()) 433 return true; 434 return false; 435 } 436 437 void X86DAGToDAGISel::PreprocessISelDAG() { 438 // OptForSize is used in pattern predicates that isel is matching. 439 OptForSize = MF->getFunction()->getAttributes(). 440 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 441 442 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 443 E = CurDAG->allnodes_end(); I != E; ) { 444 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues. 445 446 if (OptLevel != CodeGenOpt::None && 447 (N->getOpcode() == X86ISD::CALL || 448 (N->getOpcode() == X86ISD::TC_RETURN && 449 // Only does this if load can be folded into TC_RETURN. 450 (Subtarget->is64Bit() || 451 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) { 452 /// Also try moving call address load from outside callseq_start to just 453 /// before the call to allow it to be folded. 454 /// 455 /// [Load chain] 456 /// ^ 457 /// | 458 /// [Load] 459 /// ^ ^ 460 /// | | 461 /// / \-- 462 /// / | 463 ///[CALLSEQ_START] | 464 /// ^ | 465 /// | | 466 /// [LOAD/C2Reg] | 467 /// | | 468 /// \ / 469 /// \ / 470 /// [CALL] 471 bool HasCallSeq = N->getOpcode() == X86ISD::CALL; 472 SDValue Chain = N->getOperand(0); 473 SDValue Load = N->getOperand(1); 474 if (!isCalleeLoad(Load, Chain, HasCallSeq)) 475 continue; 476 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain); 477 ++NumLoadMoved; 478 continue; 479 } 480 481 // Lower fpround and fpextend nodes that target the FP stack to be store and 482 // load to the stack. This is a gross hack. We would like to simply mark 483 // these as being illegal, but when we do that, legalize produces these when 484 // it expands calls, then expands these in the same legalize pass. We would 485 // like dag combine to be able to hack on these between the call expansion 486 // and the node legalization. As such this pass basically does "really 487 // late" legalization of these inline with the X86 isel pass. 488 // FIXME: This should only happen when not compiled with -O0. 489 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND) 490 continue; 491 492 EVT SrcVT = N->getOperand(0).getValueType(); 493 EVT DstVT = N->getValueType(0); 494 495 // If any of the sources are vectors, no fp stack involved. 496 if (SrcVT.isVector() || DstVT.isVector()) 497 continue; 498 499 // If the source and destination are SSE registers, then this is a legal 500 // conversion that should not be lowered. 501 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT); 502 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT); 503 if (SrcIsSSE && DstIsSSE) 504 continue; 505 506 if (!SrcIsSSE && !DstIsSSE) { 507 // If this is an FPStack extension, it is a noop. 508 if (N->getOpcode() == ISD::FP_EXTEND) 509 continue; 510 // If this is a value-preserving FPStack truncation, it is a noop. 511 if (N->getConstantOperandVal(1)) 512 continue; 513 } 514 515 // Here we could have an FP stack truncation or an FPStack <-> SSE convert. 516 // FPStack has extload and truncstore. SSE can fold direct loads into other 517 // operations. Based on this, decide what we want to do. 518 EVT MemVT; 519 if (N->getOpcode() == ISD::FP_ROUND) 520 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'. 521 else 522 MemVT = SrcIsSSE ? SrcVT : DstVT; 523 524 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT); 525 DebugLoc dl = N->getDebugLoc(); 526 527 // FIXME: optimize the case where the src/dest is a load or store? 528 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl, 529 N->getOperand(0), 530 MemTmp, MachinePointerInfo(), MemVT, 531 false, false, 0); 532 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp, 533 MachinePointerInfo(), 534 MemVT, false, false, 0); 535 536 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the 537 // extload we created. This will cause general havok on the dag because 538 // anything below the conversion could be folded into other existing nodes. 539 // To avoid invalidating 'I', back it up to the convert node. 540 --I; 541 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 542 543 // Now that we did that, the node is dead. Increment the iterator to the 544 // next node to process, then delete N. 545 ++I; 546 CurDAG->DeleteNode(N); 547 } 548 } 549 550 551 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in 552 /// the main function. 553 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB, 554 MachineFrameInfo *MFI) { 555 const TargetInstrInfo *TII = TM.getInstrInfo(); 556 if (Subtarget->isTargetCygMing()) { 557 unsigned CallOp = 558 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; 559 BuildMI(BB, DebugLoc(), 560 TII->get(CallOp)).addExternalSymbol("__main"); 561 } 562 } 563 564 void X86DAGToDAGISel::EmitFunctionEntryCode() { 565 // If this is main, emit special code for main. 566 if (const Function *Fn = MF->getFunction()) 567 if (Fn->hasExternalLinkage() && Fn->getName() == "main") 568 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo()); 569 } 570 571 static bool isDispSafeForFrameIndex(int64_t Val) { 572 // On 64-bit platforms, we can run into an issue where a frame index 573 // includes a displacement that, when added to the explicit displacement, 574 // will overflow the displacement field. Assuming that the frame index 575 // displacement fits into a 31-bit integer (which is only slightly more 576 // aggressive than the current fundamental assumption that it fits into 577 // a 32-bit integer), a 31-bit disp should always be safe. 578 return isInt<31>(Val); 579 } 580 581 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset, 582 X86ISelAddressMode &AM) { 583 int64_t Val = AM.Disp + Offset; 584 CodeModel::Model M = TM.getCodeModel(); 585 if (Subtarget->is64Bit()) { 586 if (!X86::isOffsetSuitableForCodeModel(Val, M, 587 AM.hasSymbolicDisplacement())) 588 return true; 589 // In addition to the checks required for a register base, check that 590 // we do not try to use an unsafe Disp with a frame index. 591 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase && 592 !isDispSafeForFrameIndex(Val)) 593 return true; 594 } 595 AM.Disp = Val; 596 return false; 597 598 } 599 600 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){ 601 SDValue Address = N->getOperand(1); 602 603 // load gs:0 -> GS segment register. 604 // load fs:0 -> FS segment register. 605 // 606 // This optimization is valid because the GNU TLS model defines that 607 // gs:0 (or fs:0 on X86-64) contains its own address. 608 // For more information see http://people.redhat.com/drepper/tls.pdf 609 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address)) 610 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 && 611 Subtarget->isTargetLinux()) 612 switch (N->getPointerInfo().getAddrSpace()) { 613 case 256: 614 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 615 return false; 616 case 257: 617 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 618 return false; 619 } 620 621 return true; 622 } 623 624 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes 625 /// into an addressing mode. These wrap things that will resolve down into a 626 /// symbol reference. If no match is possible, this returns true, otherwise it 627 /// returns false. 628 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { 629 // If the addressing mode already has a symbol as the displacement, we can 630 // never match another symbol. 631 if (AM.hasSymbolicDisplacement()) 632 return true; 633 634 SDValue N0 = N.getOperand(0); 635 CodeModel::Model M = TM.getCodeModel(); 636 637 // Handle X86-64 rip-relative addresses. We check this before checking direct 638 // folding because RIP is preferable to non-RIP accesses. 639 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP && 640 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so 641 // they cannot be folded into immediate fields. 642 // FIXME: This can be improved for kernel and other models? 643 (M == CodeModel::Small || M == CodeModel::Kernel)) { 644 // Base and index reg must be 0 in order to use %rip as base. 645 if (AM.hasBaseOrIndexReg()) 646 return true; 647 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 648 X86ISelAddressMode Backup = AM; 649 AM.GV = G->getGlobal(); 650 AM.SymbolFlags = G->getTargetFlags(); 651 if (FoldOffsetIntoAddress(G->getOffset(), AM)) { 652 AM = Backup; 653 return true; 654 } 655 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 656 X86ISelAddressMode Backup = AM; 657 AM.CP = CP->getConstVal(); 658 AM.Align = CP->getAlignment(); 659 AM.SymbolFlags = CP->getTargetFlags(); 660 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) { 661 AM = Backup; 662 return true; 663 } 664 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 665 AM.ES = S->getSymbol(); 666 AM.SymbolFlags = S->getTargetFlags(); 667 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 668 AM.JT = J->getIndex(); 669 AM.SymbolFlags = J->getTargetFlags(); 670 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 671 X86ISelAddressMode Backup = AM; 672 AM.BlockAddr = BA->getBlockAddress(); 673 AM.SymbolFlags = BA->getTargetFlags(); 674 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) { 675 AM = Backup; 676 return true; 677 } 678 } else 679 llvm_unreachable("Unhandled symbol reference node."); 680 681 if (N.getOpcode() == X86ISD::WrapperRIP) 682 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); 683 return false; 684 } 685 686 // Handle the case when globals fit in our immediate field: This is true for 687 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit 688 // mode, this only applies to a non-RIP-relative computation. 689 if (!Subtarget->is64Bit() || 690 M == CodeModel::Small || M == CodeModel::Kernel) { 691 assert(N.getOpcode() != X86ISD::WrapperRIP && 692 "RIP-relative addressing already handled"); 693 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { 694 AM.GV = G->getGlobal(); 695 AM.Disp += G->getOffset(); 696 AM.SymbolFlags = G->getTargetFlags(); 697 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { 698 AM.CP = CP->getConstVal(); 699 AM.Align = CP->getAlignment(); 700 AM.Disp += CP->getOffset(); 701 AM.SymbolFlags = CP->getTargetFlags(); 702 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { 703 AM.ES = S->getSymbol(); 704 AM.SymbolFlags = S->getTargetFlags(); 705 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { 706 AM.JT = J->getIndex(); 707 AM.SymbolFlags = J->getTargetFlags(); 708 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) { 709 AM.BlockAddr = BA->getBlockAddress(); 710 AM.Disp += BA->getOffset(); 711 AM.SymbolFlags = BA->getTargetFlags(); 712 } else 713 llvm_unreachable("Unhandled symbol reference node."); 714 return false; 715 } 716 717 return true; 718 } 719 720 /// MatchAddress - Add the specified node to the specified addressing mode, 721 /// returning true if it cannot be done. This just pattern matches for the 722 /// addressing mode. 723 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) { 724 if (MatchAddressRecursively(N, AM, 0)) 725 return true; 726 727 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has 728 // a smaller encoding and avoids a scaled-index. 729 if (AM.Scale == 2 && 730 AM.BaseType == X86ISelAddressMode::RegBase && 731 AM.Base_Reg.getNode() == 0) { 732 AM.Base_Reg = AM.IndexReg; 733 AM.Scale = 1; 734 } 735 736 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode, 737 // because it has a smaller encoding. 738 // TODO: Which other code models can use this? 739 if (TM.getCodeModel() == CodeModel::Small && 740 Subtarget->is64Bit() && 741 AM.Scale == 1 && 742 AM.BaseType == X86ISelAddressMode::RegBase && 743 AM.Base_Reg.getNode() == 0 && 744 AM.IndexReg.getNode() == 0 && 745 AM.SymbolFlags == X86II::MO_NO_FLAG && 746 AM.hasSymbolicDisplacement()) 747 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64); 748 749 return false; 750 } 751 752 // Insert a node into the DAG at least before the Pos node's position. This 753 // will reposition the node as needed, and will assign it a node ID that is <= 754 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node 755 // IDs! The selection DAG must no longer depend on their uniqueness when this 756 // is used. 757 static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) { 758 if (N.getNode()->getNodeId() == -1 || 759 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) { 760 DAG.RepositionNode(Pos.getNode(), N.getNode()); 761 N.getNode()->setNodeId(Pos.getNode()->getNodeId()); 762 } 763 } 764 765 // Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This 766 // allows us to convert the shift and and into an h-register extract and 767 // a scaled index. Returns false if the simplification is performed. 768 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N, 769 uint64_t Mask, 770 SDValue Shift, SDValue X, 771 X86ISelAddressMode &AM) { 772 if (Shift.getOpcode() != ISD::SRL || 773 !isa<ConstantSDNode>(Shift.getOperand(1)) || 774 !Shift.hasOneUse()) 775 return true; 776 777 int ScaleLog = 8 - Shift.getConstantOperandVal(1); 778 if (ScaleLog <= 0 || ScaleLog >= 4 || 779 Mask != (0xffu << ScaleLog)) 780 return true; 781 782 EVT VT = N.getValueType(); 783 DebugLoc DL = N.getDebugLoc(); 784 SDValue Eight = DAG.getConstant(8, MVT::i8); 785 SDValue NewMask = DAG.getConstant(0xff, VT); 786 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight); 787 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask); 788 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8); 789 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount); 790 791 // Insert the new nodes into the topological ordering. We must do this in 792 // a valid topological ordering as nothing is going to go back and re-sort 793 // these nodes. We continually insert before 'N' in sequence as this is 794 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 795 // hierarchy left to express. 796 InsertDAGNode(DAG, N, Eight); 797 InsertDAGNode(DAG, N, Srl); 798 InsertDAGNode(DAG, N, NewMask); 799 InsertDAGNode(DAG, N, And); 800 InsertDAGNode(DAG, N, ShlCount); 801 InsertDAGNode(DAG, N, Shl); 802 DAG.ReplaceAllUsesWith(N, Shl); 803 AM.IndexReg = And; 804 AM.Scale = (1 << ScaleLog); 805 return false; 806 } 807 808 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this 809 // allows us to fold the shift into this addressing mode. Returns false if the 810 // transform succeeded. 811 static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N, 812 uint64_t Mask, 813 SDValue Shift, SDValue X, 814 X86ISelAddressMode &AM) { 815 if (Shift.getOpcode() != ISD::SHL || 816 !isa<ConstantSDNode>(Shift.getOperand(1))) 817 return true; 818 819 // Not likely to be profitable if either the AND or SHIFT node has more 820 // than one use (unless all uses are for address computation). Besides, 821 // isel mechanism requires their node ids to be reused. 822 if (!N.hasOneUse() || !Shift.hasOneUse()) 823 return true; 824 825 // Verify that the shift amount is something we can fold. 826 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 827 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) 828 return true; 829 830 EVT VT = N.getValueType(); 831 DebugLoc DL = N.getDebugLoc(); 832 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT); 833 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask); 834 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1)); 835 836 // Insert the new nodes into the topological ordering. We must do this in 837 // a valid topological ordering as nothing is going to go back and re-sort 838 // these nodes. We continually insert before 'N' in sequence as this is 839 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 840 // hierarchy left to express. 841 InsertDAGNode(DAG, N, NewMask); 842 InsertDAGNode(DAG, N, NewAnd); 843 InsertDAGNode(DAG, N, NewShift); 844 DAG.ReplaceAllUsesWith(N, NewShift); 845 846 AM.Scale = 1 << ShiftAmt; 847 AM.IndexReg = NewAnd; 848 return false; 849 } 850 851 // Implement some heroics to detect shifts of masked values where the mask can 852 // be replaced by extending the shift and undoing that in the addressing mode 853 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and 854 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in 855 // the addressing mode. This results in code such as: 856 // 857 // int f(short *y, int *lookup_table) { 858 // ... 859 // return *y + lookup_table[*y >> 11]; 860 // } 861 // 862 // Turning into: 863 // movzwl (%rdi), %eax 864 // movl %eax, %ecx 865 // shrl $11, %ecx 866 // addl (%rsi,%rcx,4), %eax 867 // 868 // Instead of: 869 // movzwl (%rdi), %eax 870 // movl %eax, %ecx 871 // shrl $9, %ecx 872 // andl $124, %rcx 873 // addl (%rsi,%rcx), %eax 874 // 875 // Note that this function assumes the mask is provided as a mask *after* the 876 // value is shifted. The input chain may or may not match that, but computing 877 // such a mask is trivial. 878 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N, 879 uint64_t Mask, 880 SDValue Shift, SDValue X, 881 X86ISelAddressMode &AM) { 882 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() || 883 !isa<ConstantSDNode>(Shift.getOperand(1))) 884 return true; 885 886 unsigned ShiftAmt = Shift.getConstantOperandVal(1); 887 unsigned MaskLZ = CountLeadingZeros_64(Mask); 888 unsigned MaskTZ = CountTrailingZeros_64(Mask); 889 890 // The amount of shift we're trying to fit into the addressing mode is taken 891 // from the trailing zeros of the mask. 892 unsigned AMShiftAmt = MaskTZ; 893 894 // There is nothing we can do here unless the mask is removing some bits. 895 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits. 896 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true; 897 898 // We also need to ensure that mask is a continuous run of bits. 899 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true; 900 901 // Scale the leading zero count down based on the actual size of the value. 902 // Also scale it down based on the size of the shift. 903 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt; 904 905 // The final check is to ensure that any masked out high bits of X are 906 // already known to be zero. Otherwise, the mask has a semantic impact 907 // other than masking out a couple of low bits. Unfortunately, because of 908 // the mask, zero extensions will be removed from operands in some cases. 909 // This code works extra hard to look through extensions because we can 910 // replace them with zero extensions cheaply if necessary. 911 bool ReplacingAnyExtend = false; 912 if (X.getOpcode() == ISD::ANY_EXTEND) { 913 unsigned ExtendBits = 914 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits(); 915 // Assume that we'll replace the any-extend with a zero-extend, and 916 // narrow the search to the extended value. 917 X = X.getOperand(0); 918 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits; 919 ReplacingAnyExtend = true; 920 } 921 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(), 922 MaskLZ); 923 APInt KnownZero, KnownOne; 924 DAG.ComputeMaskedBits(X, KnownZero, KnownOne); 925 if (MaskedHighBits != KnownZero) return true; 926 927 // We've identified a pattern that can be transformed into a single shift 928 // and an addressing mode. Make it so. 929 EVT VT = N.getValueType(); 930 if (ReplacingAnyExtend) { 931 assert(X.getValueType() != VT); 932 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND. 933 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X); 934 InsertDAGNode(DAG, N, NewX); 935 X = NewX; 936 } 937 DebugLoc DL = N.getDebugLoc(); 938 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8); 939 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt); 940 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8); 941 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt); 942 943 // Insert the new nodes into the topological ordering. We must do this in 944 // a valid topological ordering as nothing is going to go back and re-sort 945 // these nodes. We continually insert before 'N' in sequence as this is 946 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no 947 // hierarchy left to express. 948 InsertDAGNode(DAG, N, NewSRLAmt); 949 InsertDAGNode(DAG, N, NewSRL); 950 InsertDAGNode(DAG, N, NewSHLAmt); 951 InsertDAGNode(DAG, N, NewSHL); 952 DAG.ReplaceAllUsesWith(N, NewSHL); 953 954 AM.Scale = 1 << AMShiftAmt; 955 AM.IndexReg = NewSRL; 956 return false; 957 } 958 959 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, 960 unsigned Depth) { 961 DebugLoc dl = N.getDebugLoc(); 962 DEBUG({ 963 dbgs() << "MatchAddress: "; 964 AM.dump(); 965 }); 966 // Limit recursion. 967 if (Depth > 5) 968 return MatchAddressBase(N, AM); 969 970 // If this is already a %rip relative address, we can only merge immediates 971 // into it. Instead of handling this in every case, we handle it here. 972 // RIP relative addressing: %rip + 32-bit displacement! 973 if (AM.isRIPRelative()) { 974 // FIXME: JumpTable and ExternalSymbol address currently don't like 975 // displacements. It isn't very important, but this should be fixed for 976 // consistency. 977 if (!AM.ES && AM.JT != -1) return true; 978 979 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) 980 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM)) 981 return false; 982 return true; 983 } 984 985 switch (N.getOpcode()) { 986 default: break; 987 case ISD::Constant: { 988 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); 989 if (!FoldOffsetIntoAddress(Val, AM)) 990 return false; 991 break; 992 } 993 994 case X86ISD::Wrapper: 995 case X86ISD::WrapperRIP: 996 if (!MatchWrapper(N, AM)) 997 return false; 998 break; 999 1000 case ISD::LOAD: 1001 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM)) 1002 return false; 1003 break; 1004 1005 case ISD::FrameIndex: 1006 if (AM.BaseType == X86ISelAddressMode::RegBase && 1007 AM.Base_Reg.getNode() == 0 && 1008 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) { 1009 AM.BaseType = X86ISelAddressMode::FrameIndexBase; 1010 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex(); 1011 return false; 1012 } 1013 break; 1014 1015 case ISD::SHL: 1016 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) 1017 break; 1018 1019 if (ConstantSDNode 1020 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) { 1021 unsigned Val = CN->getZExtValue(); 1022 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so 1023 // that the base operand remains free for further matching. If 1024 // the base doesn't end up getting used, a post-processing step 1025 // in MatchAddress turns (,x,2) into (x,x), which is cheaper. 1026 if (Val == 1 || Val == 2 || Val == 3) { 1027 AM.Scale = 1 << Val; 1028 SDValue ShVal = N.getNode()->getOperand(0); 1029 1030 // Okay, we know that we have a scale by now. However, if the scaled 1031 // value is an add of something and a constant, we can fold the 1032 // constant into the disp field here. 1033 if (CurDAG->isBaseWithConstantOffset(ShVal)) { 1034 AM.IndexReg = ShVal.getNode()->getOperand(0); 1035 ConstantSDNode *AddVal = 1036 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1)); 1037 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; 1038 if (!FoldOffsetIntoAddress(Disp, AM)) 1039 return false; 1040 } 1041 1042 AM.IndexReg = ShVal; 1043 return false; 1044 } 1045 } 1046 break; 1047 1048 case ISD::SRL: { 1049 // Scale must not be used already. 1050 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1051 1052 SDValue And = N.getOperand(0); 1053 if (And.getOpcode() != ISD::AND) break; 1054 SDValue X = And.getOperand(0); 1055 1056 // We only handle up to 64-bit values here as those are what matter for 1057 // addressing mode optimizations. 1058 if (X.getValueSizeInBits() > 64) break; 1059 1060 // The mask used for the transform is expected to be post-shift, but we 1061 // found the shift first so just apply the shift to the mask before passing 1062 // it down. 1063 if (!isa<ConstantSDNode>(N.getOperand(1)) || 1064 !isa<ConstantSDNode>(And.getOperand(1))) 1065 break; 1066 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1); 1067 1068 // Try to fold the mask and shift into the scale, and return false if we 1069 // succeed. 1070 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM)) 1071 return false; 1072 break; 1073 } 1074 1075 case ISD::SMUL_LOHI: 1076 case ISD::UMUL_LOHI: 1077 // A mul_lohi where we need the low part can be folded as a plain multiply. 1078 if (N.getResNo() != 0) break; 1079 // FALL THROUGH 1080 case ISD::MUL: 1081 case X86ISD::MUL_IMM: 1082 // X*[3,5,9] -> X+X*[2,4,8] 1083 if (AM.BaseType == X86ISelAddressMode::RegBase && 1084 AM.Base_Reg.getNode() == 0 && 1085 AM.IndexReg.getNode() == 0) { 1086 if (ConstantSDNode 1087 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) 1088 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || 1089 CN->getZExtValue() == 9) { 1090 AM.Scale = unsigned(CN->getZExtValue())-1; 1091 1092 SDValue MulVal = N.getNode()->getOperand(0); 1093 SDValue Reg; 1094 1095 // Okay, we know that we have a scale by now. However, if the scaled 1096 // value is an add of something and a constant, we can fold the 1097 // constant into the disp field here. 1098 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() && 1099 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) { 1100 Reg = MulVal.getNode()->getOperand(0); 1101 ConstantSDNode *AddVal = 1102 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1)); 1103 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue(); 1104 if (FoldOffsetIntoAddress(Disp, AM)) 1105 Reg = N.getNode()->getOperand(0); 1106 } else { 1107 Reg = N.getNode()->getOperand(0); 1108 } 1109 1110 AM.IndexReg = AM.Base_Reg = Reg; 1111 return false; 1112 } 1113 } 1114 break; 1115 1116 case ISD::SUB: { 1117 // Given A-B, if A can be completely folded into the address and 1118 // the index field with the index field unused, use -B as the index. 1119 // This is a win if a has multiple parts that can be folded into 1120 // the address. Also, this saves a mov if the base register has 1121 // other uses, since it avoids a two-address sub instruction, however 1122 // it costs an additional mov if the index register has other uses. 1123 1124 // Add an artificial use to this node so that we can keep track of 1125 // it if it gets CSE'd with a different node. 1126 HandleSDNode Handle(N); 1127 1128 // Test if the LHS of the sub can be folded. 1129 X86ISelAddressMode Backup = AM; 1130 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) { 1131 AM = Backup; 1132 break; 1133 } 1134 // Test if the index field is free for use. 1135 if (AM.IndexReg.getNode() || AM.isRIPRelative()) { 1136 AM = Backup; 1137 break; 1138 } 1139 1140 int Cost = 0; 1141 SDValue RHS = Handle.getValue().getNode()->getOperand(1); 1142 // If the RHS involves a register with multiple uses, this 1143 // transformation incurs an extra mov, due to the neg instruction 1144 // clobbering its operand. 1145 if (!RHS.getNode()->hasOneUse() || 1146 RHS.getNode()->getOpcode() == ISD::CopyFromReg || 1147 RHS.getNode()->getOpcode() == ISD::TRUNCATE || 1148 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND || 1149 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND && 1150 RHS.getNode()->getOperand(0).getValueType() == MVT::i32)) 1151 ++Cost; 1152 // If the base is a register with multiple uses, this 1153 // transformation may save a mov. 1154 if ((AM.BaseType == X86ISelAddressMode::RegBase && 1155 AM.Base_Reg.getNode() && 1156 !AM.Base_Reg.getNode()->hasOneUse()) || 1157 AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1158 --Cost; 1159 // If the folded LHS was interesting, this transformation saves 1160 // address arithmetic. 1161 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) + 1162 ((AM.Disp != 0) && (Backup.Disp == 0)) + 1163 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2) 1164 --Cost; 1165 // If it doesn't look like it may be an overall win, don't do it. 1166 if (Cost >= 0) { 1167 AM = Backup; 1168 break; 1169 } 1170 1171 // Ok, the transformation is legal and appears profitable. Go for it. 1172 SDValue Zero = CurDAG->getConstant(0, N.getValueType()); 1173 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS); 1174 AM.IndexReg = Neg; 1175 AM.Scale = 1; 1176 1177 // Insert the new nodes into the topological ordering. 1178 InsertDAGNode(*CurDAG, N, Zero); 1179 InsertDAGNode(*CurDAG, N, Neg); 1180 return false; 1181 } 1182 1183 case ISD::ADD: { 1184 // Add an artificial use to this node so that we can keep track of 1185 // it if it gets CSE'd with a different node. 1186 HandleSDNode Handle(N); 1187 1188 X86ISelAddressMode Backup = AM; 1189 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1190 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)) 1191 return false; 1192 AM = Backup; 1193 1194 // Try again after commuting the operands. 1195 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&& 1196 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1)) 1197 return false; 1198 AM = Backup; 1199 1200 // If we couldn't fold both operands into the address at the same time, 1201 // see if we can just put each operand into a register and fold at least 1202 // the add. 1203 if (AM.BaseType == X86ISelAddressMode::RegBase && 1204 !AM.Base_Reg.getNode() && 1205 !AM.IndexReg.getNode()) { 1206 N = Handle.getValue(); 1207 AM.Base_Reg = N.getOperand(0); 1208 AM.IndexReg = N.getOperand(1); 1209 AM.Scale = 1; 1210 return false; 1211 } 1212 N = Handle.getValue(); 1213 break; 1214 } 1215 1216 case ISD::OR: 1217 // Handle "X | C" as "X + C" iff X is known to have C bits clear. 1218 if (CurDAG->isBaseWithConstantOffset(N)) { 1219 X86ISelAddressMode Backup = AM; 1220 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1)); 1221 1222 // Start with the LHS as an addr mode. 1223 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) && 1224 !FoldOffsetIntoAddress(CN->getSExtValue(), AM)) 1225 return false; 1226 AM = Backup; 1227 } 1228 break; 1229 1230 case ISD::AND: { 1231 // Perform some heroic transforms on an and of a constant-count shift 1232 // with a constant to enable use of the scaled offset field. 1233 1234 // Scale must not be used already. 1235 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; 1236 1237 SDValue Shift = N.getOperand(0); 1238 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break; 1239 SDValue X = Shift.getOperand(0); 1240 1241 // We only handle up to 64-bit values here as those are what matter for 1242 // addressing mode optimizations. 1243 if (X.getValueSizeInBits() > 64) break; 1244 1245 if (!isa<ConstantSDNode>(N.getOperand(1))) 1246 break; 1247 uint64_t Mask = N.getConstantOperandVal(1); 1248 1249 // Try to fold the mask and shift into an extract and scale. 1250 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM)) 1251 return false; 1252 1253 // Try to fold the mask and shift directly into the scale. 1254 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM)) 1255 return false; 1256 1257 // Try to swap the mask and shift to place shifts which can be done as 1258 // a scale on the outside of the mask. 1259 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM)) 1260 return false; 1261 break; 1262 } 1263 } 1264 1265 return MatchAddressBase(N, AM); 1266 } 1267 1268 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the 1269 /// specified addressing mode without any further recursion. 1270 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) { 1271 // Is the base register already occupied? 1272 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) { 1273 // If so, check to see if the scale index register is set. 1274 if (AM.IndexReg.getNode() == 0) { 1275 AM.IndexReg = N; 1276 AM.Scale = 1; 1277 return false; 1278 } 1279 1280 // Otherwise, we cannot select it. 1281 return true; 1282 } 1283 1284 // Default, generate it as a register. 1285 AM.BaseType = X86ISelAddressMode::RegBase; 1286 AM.Base_Reg = N; 1287 return false; 1288 } 1289 1290 /// SelectAddr - returns true if it is able pattern match an addressing mode. 1291 /// It returns the operands which make up the maximal addressing mode it can 1292 /// match by reference. 1293 /// 1294 /// Parent is the parent node of the addr operand that is being matched. It 1295 /// is always a load, store, atomic node, or null. It is only null when 1296 /// checking memory operands for inline asm nodes. 1297 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, 1298 SDValue &Scale, SDValue &Index, 1299 SDValue &Disp, SDValue &Segment) { 1300 X86ISelAddressMode AM; 1301 1302 if (Parent && 1303 // This list of opcodes are all the nodes that have an "addr:$ptr" operand 1304 // that are not a MemSDNode, and thus don't have proper addrspace info. 1305 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme 1306 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores 1307 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme 1308 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp 1309 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp 1310 unsigned AddrSpace = 1311 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace(); 1312 // AddrSpace 256 -> GS, 257 -> FS. 1313 if (AddrSpace == 256) 1314 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16); 1315 if (AddrSpace == 257) 1316 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16); 1317 } 1318 1319 if (MatchAddress(N, AM)) 1320 return false; 1321 1322 EVT VT = N.getValueType(); 1323 if (AM.BaseType == X86ISelAddressMode::RegBase) { 1324 if (!AM.Base_Reg.getNode()) 1325 AM.Base_Reg = CurDAG->getRegister(0, VT); 1326 } 1327 1328 if (!AM.IndexReg.getNode()) 1329 AM.IndexReg = CurDAG->getRegister(0, VT); 1330 1331 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1332 return true; 1333 } 1334 1335 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to 1336 /// match a load whose top elements are either undef or zeros. The load flavor 1337 /// is derived from the type of N, which is either v4f32 or v2f64. 1338 /// 1339 /// We also return: 1340 /// PatternChainNode: this is the matched node that has a chain input and 1341 /// output. 1342 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root, 1343 SDValue N, SDValue &Base, 1344 SDValue &Scale, SDValue &Index, 1345 SDValue &Disp, SDValue &Segment, 1346 SDValue &PatternNodeWithChain) { 1347 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { 1348 PatternNodeWithChain = N.getOperand(0); 1349 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) && 1350 PatternNodeWithChain.hasOneUse() && 1351 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1352 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1353 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain); 1354 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1355 return false; 1356 return true; 1357 } 1358 } 1359 1360 // Also handle the case where we explicitly require zeros in the top 1361 // elements. This is a vector shuffle from the zero vector. 1362 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() && 1363 // Check to see if the top elements are all zeros (or bitcast of zeros). 1364 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && 1365 N.getOperand(0).getNode()->hasOneUse() && 1366 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) && 1367 N.getOperand(0).getOperand(0).hasOneUse() && 1368 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) && 1369 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) { 1370 // Okay, this is a zero extending load. Fold it. 1371 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0)); 1372 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment)) 1373 return false; 1374 PatternNodeWithChain = SDValue(LD, 0); 1375 return true; 1376 } 1377 return false; 1378 } 1379 1380 1381 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing 1382 /// mode it matches can be cost effectively emitted as an LEA instruction. 1383 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N, 1384 SDValue &Base, SDValue &Scale, 1385 SDValue &Index, SDValue &Disp, 1386 SDValue &Segment) { 1387 X86ISelAddressMode AM; 1388 1389 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support 1390 // segments. 1391 SDValue Copy = AM.Segment; 1392 SDValue T = CurDAG->getRegister(0, MVT::i32); 1393 AM.Segment = T; 1394 if (MatchAddress(N, AM)) 1395 return false; 1396 assert (T == AM.Segment); 1397 AM.Segment = Copy; 1398 1399 EVT VT = N.getValueType(); 1400 unsigned Complexity = 0; 1401 if (AM.BaseType == X86ISelAddressMode::RegBase) 1402 if (AM.Base_Reg.getNode()) 1403 Complexity = 1; 1404 else 1405 AM.Base_Reg = CurDAG->getRegister(0, VT); 1406 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase) 1407 Complexity = 4; 1408 1409 if (AM.IndexReg.getNode()) 1410 Complexity++; 1411 else 1412 AM.IndexReg = CurDAG->getRegister(0, VT); 1413 1414 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with 1415 // a simple shift. 1416 if (AM.Scale > 1) 1417 Complexity++; 1418 1419 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA 1420 // to a LEA. This is determined with some expermentation but is by no means 1421 // optimal (especially for code size consideration). LEA is nice because of 1422 // its three-address nature. Tweak the cost function again when we can run 1423 // convertToThreeAddress() at register allocation time. 1424 if (AM.hasSymbolicDisplacement()) { 1425 // For X86-64, we should always use lea to materialize RIP relative 1426 // addresses. 1427 if (Subtarget->is64Bit()) 1428 Complexity = 4; 1429 else 1430 Complexity += 2; 1431 } 1432 1433 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode())) 1434 Complexity++; 1435 1436 // If it isn't worth using an LEA, reject it. 1437 if (Complexity <= 2) 1438 return false; 1439 1440 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1441 return true; 1442 } 1443 1444 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes. 1445 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base, 1446 SDValue &Scale, SDValue &Index, 1447 SDValue &Disp, SDValue &Segment) { 1448 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress); 1449 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 1450 1451 X86ISelAddressMode AM; 1452 AM.GV = GA->getGlobal(); 1453 AM.Disp += GA->getOffset(); 1454 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType()); 1455 AM.SymbolFlags = GA->getTargetFlags(); 1456 1457 if (N.getValueType() == MVT::i32) { 1458 AM.Scale = 1; 1459 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32); 1460 } else { 1461 AM.IndexReg = CurDAG->getRegister(0, MVT::i64); 1462 } 1463 1464 getAddressOperands(AM, Base, Scale, Index, Disp, Segment); 1465 return true; 1466 } 1467 1468 1469 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N, 1470 SDValue &Base, SDValue &Scale, 1471 SDValue &Index, SDValue &Disp, 1472 SDValue &Segment) { 1473 if (!ISD::isNON_EXTLoad(N.getNode()) || 1474 !IsProfitableToFold(N, P, P) || 1475 !IsLegalToFold(N, P, P, OptLevel)) 1476 return false; 1477 1478 return SelectAddr(N.getNode(), 1479 N.getOperand(1), Base, Scale, Index, Disp, Segment); 1480 } 1481 1482 /// getGlobalBaseReg - Return an SDNode that returns the value of 1483 /// the global base register. Output instructions required to 1484 /// initialize the global base register, if necessary. 1485 /// 1486 SDNode *X86DAGToDAGISel::getGlobalBaseReg() { 1487 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF); 1488 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); 1489 } 1490 1491 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { 1492 SDValue Chain = Node->getOperand(0); 1493 SDValue In1 = Node->getOperand(1); 1494 SDValue In2L = Node->getOperand(2); 1495 SDValue In2H = Node->getOperand(3); 1496 1497 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1498 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1499 return NULL; 1500 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1501 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1502 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain}; 1503 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1504 MVT::i32, MVT::i32, MVT::Other, Ops, 1505 array_lengthof(Ops)); 1506 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1); 1507 return ResNode; 1508 } 1509 1510 /// Atomic opcode table 1511 /// 1512 enum AtomicOpc { 1513 ADD, 1514 SUB, 1515 INC, 1516 DEC, 1517 OR, 1518 AND, 1519 XOR, 1520 AtomicOpcEnd 1521 }; 1522 1523 enum AtomicSz { 1524 ConstantI8, 1525 I8, 1526 SextConstantI16, 1527 ConstantI16, 1528 I16, 1529 SextConstantI32, 1530 ConstantI32, 1531 I32, 1532 SextConstantI64, 1533 ConstantI64, 1534 I64, 1535 AtomicSzEnd 1536 }; 1537 1538 static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { 1539 { 1540 X86::LOCK_ADD8mi, 1541 X86::LOCK_ADD8mr, 1542 X86::LOCK_ADD16mi8, 1543 X86::LOCK_ADD16mi, 1544 X86::LOCK_ADD16mr, 1545 X86::LOCK_ADD32mi8, 1546 X86::LOCK_ADD32mi, 1547 X86::LOCK_ADD32mr, 1548 X86::LOCK_ADD64mi8, 1549 X86::LOCK_ADD64mi32, 1550 X86::LOCK_ADD64mr, 1551 }, 1552 { 1553 X86::LOCK_SUB8mi, 1554 X86::LOCK_SUB8mr, 1555 X86::LOCK_SUB16mi8, 1556 X86::LOCK_SUB16mi, 1557 X86::LOCK_SUB16mr, 1558 X86::LOCK_SUB32mi8, 1559 X86::LOCK_SUB32mi, 1560 X86::LOCK_SUB32mr, 1561 X86::LOCK_SUB64mi8, 1562 X86::LOCK_SUB64mi32, 1563 X86::LOCK_SUB64mr, 1564 }, 1565 { 1566 0, 1567 X86::LOCK_INC8m, 1568 0, 1569 0, 1570 X86::LOCK_INC16m, 1571 0, 1572 0, 1573 X86::LOCK_INC32m, 1574 0, 1575 0, 1576 X86::LOCK_INC64m, 1577 }, 1578 { 1579 0, 1580 X86::LOCK_DEC8m, 1581 0, 1582 0, 1583 X86::LOCK_DEC16m, 1584 0, 1585 0, 1586 X86::LOCK_DEC32m, 1587 0, 1588 0, 1589 X86::LOCK_DEC64m, 1590 }, 1591 { 1592 X86::LOCK_OR8mi, 1593 X86::LOCK_OR8mr, 1594 X86::LOCK_OR16mi8, 1595 X86::LOCK_OR16mi, 1596 X86::LOCK_OR16mr, 1597 X86::LOCK_OR32mi8, 1598 X86::LOCK_OR32mi, 1599 X86::LOCK_OR32mr, 1600 X86::LOCK_OR64mi8, 1601 X86::LOCK_OR64mi32, 1602 X86::LOCK_OR64mr, 1603 }, 1604 { 1605 X86::LOCK_AND8mi, 1606 X86::LOCK_AND8mr, 1607 X86::LOCK_AND16mi8, 1608 X86::LOCK_AND16mi, 1609 X86::LOCK_AND16mr, 1610 X86::LOCK_AND32mi8, 1611 X86::LOCK_AND32mi, 1612 X86::LOCK_AND32mr, 1613 X86::LOCK_AND64mi8, 1614 X86::LOCK_AND64mi32, 1615 X86::LOCK_AND64mr, 1616 }, 1617 { 1618 X86::LOCK_XOR8mi, 1619 X86::LOCK_XOR8mr, 1620 X86::LOCK_XOR16mi8, 1621 X86::LOCK_XOR16mi, 1622 X86::LOCK_XOR16mr, 1623 X86::LOCK_XOR32mi8, 1624 X86::LOCK_XOR32mi, 1625 X86::LOCK_XOR32mr, 1626 X86::LOCK_XOR64mi8, 1627 X86::LOCK_XOR64mi32, 1628 X86::LOCK_XOR64mr, 1629 } 1630 }; 1631 1632 // Return the target constant operand for atomic-load-op and do simple 1633 // translations, such as from atomic-load-add to lock-sub. The return value is 1634 // one of the following 3 cases: 1635 // + target-constant, the operand could be supported as a target constant. 1636 // + empty, the operand is not needed any more with the new op selected. 1637 // + non-empty, otherwise. 1638 static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, 1639 DebugLoc dl, 1640 enum AtomicOpc &Op, EVT NVT, 1641 SDValue Val) { 1642 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) { 1643 int64_t CNVal = CN->getSExtValue(); 1644 // Quit if not 32-bit imm. 1645 if ((int32_t)CNVal != CNVal) 1646 return Val; 1647 // For atomic-load-add, we could do some optimizations. 1648 if (Op == ADD) { 1649 // Translate to INC/DEC if ADD by 1 or -1. 1650 if ((CNVal == 1) || (CNVal == -1)) { 1651 Op = (CNVal == 1) ? INC : DEC; 1652 // No more constant operand after being translated into INC/DEC. 1653 return SDValue(); 1654 } 1655 // Translate to SUB if ADD by negative value. 1656 if (CNVal < 0) { 1657 Op = SUB; 1658 CNVal = -CNVal; 1659 } 1660 } 1661 return CurDAG->getTargetConstant(CNVal, NVT); 1662 } 1663 1664 // If the value operand is single-used, try to optimize it. 1665 if (Op == ADD && Val.hasOneUse()) { 1666 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x). 1667 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) { 1668 Op = SUB; 1669 return Val.getOperand(1); 1670 } 1671 // A special case for i16, which needs truncating as, in most cases, it's 1672 // promoted to i32. We will translate 1673 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x)) 1674 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 && 1675 Val.getOperand(0).getOpcode() == ISD::SUB && 1676 X86::isZeroNode(Val.getOperand(0).getOperand(0))) { 1677 Op = SUB; 1678 Val = Val.getOperand(0); 1679 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT, 1680 Val.getOperand(1)); 1681 } 1682 } 1683 1684 return Val; 1685 } 1686 1687 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { 1688 if (Node->hasAnyUseOfValue(0)) 1689 return 0; 1690 1691 DebugLoc dl = Node->getDebugLoc(); 1692 1693 // Optimize common patterns for __sync_or_and_fetch and similar arith 1694 // operations where the result is not used. This allows us to use the "lock" 1695 // version of the arithmetic instruction. 1696 SDValue Chain = Node->getOperand(0); 1697 SDValue Ptr = Node->getOperand(1); 1698 SDValue Val = Node->getOperand(2); 1699 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 1700 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) 1701 return 0; 1702 1703 // Which index into the table. 1704 enum AtomicOpc Op; 1705 switch (Node->getOpcode()) { 1706 default: 1707 return 0; 1708 case ISD::ATOMIC_LOAD_OR: 1709 Op = OR; 1710 break; 1711 case ISD::ATOMIC_LOAD_AND: 1712 Op = AND; 1713 break; 1714 case ISD::ATOMIC_LOAD_XOR: 1715 Op = XOR; 1716 break; 1717 case ISD::ATOMIC_LOAD_ADD: 1718 Op = ADD; 1719 break; 1720 } 1721 1722 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val); 1723 bool isUnOp = !Val.getNode(); 1724 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant); 1725 1726 unsigned Opc = 0; 1727 switch (NVT.getSimpleVT().SimpleTy) { 1728 default: return 0; 1729 case MVT::i8: 1730 if (isCN) 1731 Opc = AtomicOpcTbl[Op][ConstantI8]; 1732 else 1733 Opc = AtomicOpcTbl[Op][I8]; 1734 break; 1735 case MVT::i16: 1736 if (isCN) { 1737 if (immSext8(Val.getNode())) 1738 Opc = AtomicOpcTbl[Op][SextConstantI16]; 1739 else 1740 Opc = AtomicOpcTbl[Op][ConstantI16]; 1741 } else 1742 Opc = AtomicOpcTbl[Op][I16]; 1743 break; 1744 case MVT::i32: 1745 if (isCN) { 1746 if (immSext8(Val.getNode())) 1747 Opc = AtomicOpcTbl[Op][SextConstantI32]; 1748 else 1749 Opc = AtomicOpcTbl[Op][ConstantI32]; 1750 } else 1751 Opc = AtomicOpcTbl[Op][I32]; 1752 break; 1753 case MVT::i64: 1754 Opc = AtomicOpcTbl[Op][I64]; 1755 if (isCN) { 1756 if (immSext8(Val.getNode())) 1757 Opc = AtomicOpcTbl[Op][SextConstantI64]; 1758 else if (i64immSExt32(Val.getNode())) 1759 Opc = AtomicOpcTbl[Op][ConstantI64]; 1760 } 1761 break; 1762 } 1763 1764 assert(Opc != 0 && "Invalid arith lock transform!"); 1765 1766 SDValue Ret; 1767 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, 1768 dl, NVT), 0); 1769 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); 1770 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand(); 1771 if (isUnOp) { 1772 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; 1773 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 1774 array_lengthof(Ops)), 0); 1775 } else { 1776 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; 1777 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 1778 array_lengthof(Ops)), 0); 1779 } 1780 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1); 1781 SDValue RetVals[] = { Undef, Ret }; 1782 return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); 1783 } 1784 1785 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has 1786 /// any uses which require the SF or OF bits to be accurate. 1787 static bool HasNoSignedComparisonUses(SDNode *N) { 1788 // Examine each user of the node. 1789 for (SDNode::use_iterator UI = N->use_begin(), 1790 UE = N->use_end(); UI != UE; ++UI) { 1791 // Only examine CopyToReg uses. 1792 if (UI->getOpcode() != ISD::CopyToReg) 1793 return false; 1794 // Only examine CopyToReg uses that copy to EFLAGS. 1795 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != 1796 X86::EFLAGS) 1797 return false; 1798 // Examine each user of the CopyToReg use. 1799 for (SDNode::use_iterator FlagUI = UI->use_begin(), 1800 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) { 1801 // Only examine the Flag result. 1802 if (FlagUI.getUse().getResNo() != 1) continue; 1803 // Anything unusual: assume conservatively. 1804 if (!FlagUI->isMachineOpcode()) return false; 1805 // Examine the opcode of the user. 1806 switch (FlagUI->getMachineOpcode()) { 1807 // These comparisons don't treat the most significant bit specially. 1808 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr: 1809 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: 1810 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: 1811 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: 1812 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: 1813 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: 1814 case X86::CMOVA16rr: case X86::CMOVA16rm: 1815 case X86::CMOVA32rr: case X86::CMOVA32rm: 1816 case X86::CMOVA64rr: case X86::CMOVA64rm: 1817 case X86::CMOVAE16rr: case X86::CMOVAE16rm: 1818 case X86::CMOVAE32rr: case X86::CMOVAE32rm: 1819 case X86::CMOVAE64rr: case X86::CMOVAE64rm: 1820 case X86::CMOVB16rr: case X86::CMOVB16rm: 1821 case X86::CMOVB32rr: case X86::CMOVB32rm: 1822 case X86::CMOVB64rr: case X86::CMOVB64rm: 1823 case X86::CMOVBE16rr: case X86::CMOVBE16rm: 1824 case X86::CMOVBE32rr: case X86::CMOVBE32rm: 1825 case X86::CMOVBE64rr: case X86::CMOVBE64rm: 1826 case X86::CMOVE16rr: case X86::CMOVE16rm: 1827 case X86::CMOVE32rr: case X86::CMOVE32rm: 1828 case X86::CMOVE64rr: case X86::CMOVE64rm: 1829 case X86::CMOVNE16rr: case X86::CMOVNE16rm: 1830 case X86::CMOVNE32rr: case X86::CMOVNE32rm: 1831 case X86::CMOVNE64rr: case X86::CMOVNE64rm: 1832 case X86::CMOVNP16rr: case X86::CMOVNP16rm: 1833 case X86::CMOVNP32rr: case X86::CMOVNP32rm: 1834 case X86::CMOVNP64rr: case X86::CMOVNP64rm: 1835 case X86::CMOVP16rr: case X86::CMOVP16rm: 1836 case X86::CMOVP32rr: case X86::CMOVP32rm: 1837 case X86::CMOVP64rr: case X86::CMOVP64rm: 1838 continue; 1839 // Anything else: assume conservatively. 1840 default: return false; 1841 } 1842 } 1843 } 1844 return true; 1845 } 1846 1847 /// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode 1848 /// is suitable for doing the {load; increment or decrement; store} to modify 1849 /// transformation. 1850 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc, 1851 SDValue StoredVal, SelectionDAG *CurDAG, 1852 LoadSDNode* &LoadNode, SDValue &InputChain) { 1853 1854 // is the value stored the result of a DEC or INC? 1855 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false; 1856 1857 // is the stored value result 0 of the load? 1858 if (StoredVal.getResNo() != 0) return false; 1859 1860 // are there other uses of the loaded value than the inc or dec? 1861 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false; 1862 1863 // is the store non-extending and non-indexed? 1864 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 1865 return false; 1866 1867 SDValue Load = StoredVal->getOperand(0); 1868 // Is the stored value a non-extending and non-indexed load? 1869 if (!ISD::isNormalLoad(Load.getNode())) return false; 1870 1871 // Return LoadNode by reference. 1872 LoadNode = cast<LoadSDNode>(Load); 1873 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8) 1874 EVT LdVT = LoadNode->getMemoryVT(); 1875 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 && 1876 LdVT != MVT::i8) 1877 return false; 1878 1879 // Is store the only read of the loaded value? 1880 if (!Load.hasOneUse()) 1881 return false; 1882 1883 // Is the address of the store the same as the load? 1884 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 1885 LoadNode->getOffset() != StoreNode->getOffset()) 1886 return false; 1887 1888 // Check if the chain is produced by the load or is a TokenFactor with 1889 // the load output chain as an operand. Return InputChain by reference. 1890 SDValue Chain = StoreNode->getChain(); 1891 1892 bool ChainCheck = false; 1893 if (Chain == Load.getValue(1)) { 1894 ChainCheck = true; 1895 InputChain = LoadNode->getChain(); 1896 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1897 SmallVector<SDValue, 4> ChainOps; 1898 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 1899 SDValue Op = Chain.getOperand(i); 1900 if (Op == Load.getValue(1)) { 1901 ChainCheck = true; 1902 continue; 1903 } 1904 1905 // Make sure using Op as part of the chain would not cause a cycle here. 1906 // In theory, we could check whether the chain node is a predecessor of 1907 // the load. But that can be very expensive. Instead visit the uses and 1908 // make sure they all have smaller node id than the load. 1909 int LoadId = LoadNode->getNodeId(); 1910 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 1911 UE = UI->use_end(); UI != UE; ++UI) { 1912 if (UI.getUse().getResNo() != 0) 1913 continue; 1914 if (UI->getNodeId() > LoadId) 1915 return false; 1916 } 1917 1918 ChainOps.push_back(Op); 1919 } 1920 1921 if (ChainCheck) 1922 // Make a new TokenFactor with all the other input chains except 1923 // for the load. 1924 InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(), 1925 MVT::Other, &ChainOps[0], ChainOps.size()); 1926 } 1927 if (!ChainCheck) 1928 return false; 1929 1930 return true; 1931 } 1932 1933 /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory 1934 /// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC. 1935 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) { 1936 if (Opc == X86ISD::DEC) { 1937 if (LdVT == MVT::i64) return X86::DEC64m; 1938 if (LdVT == MVT::i32) return X86::DEC32m; 1939 if (LdVT == MVT::i16) return X86::DEC16m; 1940 if (LdVT == MVT::i8) return X86::DEC8m; 1941 } else { 1942 assert(Opc == X86ISD::INC && "unrecognized opcode"); 1943 if (LdVT == MVT::i64) return X86::INC64m; 1944 if (LdVT == MVT::i32) return X86::INC32m; 1945 if (LdVT == MVT::i16) return X86::INC16m; 1946 if (LdVT == MVT::i8) return X86::INC8m; 1947 } 1948 llvm_unreachable("unrecognized size for LdVT"); 1949 } 1950 1951 /// SelectGather - Customized ISel for GATHER operations. 1952 /// 1953 SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) { 1954 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale 1955 SDValue Chain = Node->getOperand(0); 1956 SDValue VSrc = Node->getOperand(2); 1957 SDValue Base = Node->getOperand(3); 1958 SDValue VIdx = Node->getOperand(4); 1959 SDValue VMask = Node->getOperand(5); 1960 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6)); 1961 if (!Scale) 1962 return 0; 1963 1964 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(), 1965 MVT::Other); 1966 1967 // Memory Operands: Base, Scale, Index, Disp, Segment 1968 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32); 1969 SDValue Segment = CurDAG->getRegister(0, MVT::i32); 1970 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx, 1971 Disp, Segment, VMask, Chain}; 1972 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(), 1973 VTs, Ops, array_lengthof(Ops)); 1974 // Node has 2 outputs: VDst and MVT::Other. 1975 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other. 1976 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other 1977 // of ResNode. 1978 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0)); 1979 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2)); 1980 return ResNode; 1981 } 1982 1983 SDNode *X86DAGToDAGISel::Select(SDNode *Node) { 1984 EVT NVT = Node->getValueType(0); 1985 unsigned Opc, MOpc; 1986 unsigned Opcode = Node->getOpcode(); 1987 DebugLoc dl = Node->getDebugLoc(); 1988 1989 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n'); 1990 1991 if (Node->isMachineOpcode()) { 1992 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n'); 1993 return NULL; // Already selected. 1994 } 1995 1996 switch (Opcode) { 1997 default: break; 1998 case ISD::INTRINSIC_W_CHAIN: { 1999 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 2000 switch (IntNo) { 2001 default: break; 2002 case Intrinsic::x86_avx2_gather_d_pd: 2003 case Intrinsic::x86_avx2_gather_d_pd_256: 2004 case Intrinsic::x86_avx2_gather_q_pd: 2005 case Intrinsic::x86_avx2_gather_q_pd_256: 2006 case Intrinsic::x86_avx2_gather_d_ps: 2007 case Intrinsic::x86_avx2_gather_d_ps_256: 2008 case Intrinsic::x86_avx2_gather_q_ps: 2009 case Intrinsic::x86_avx2_gather_q_ps_256: 2010 case Intrinsic::x86_avx2_gather_d_q: 2011 case Intrinsic::x86_avx2_gather_d_q_256: 2012 case Intrinsic::x86_avx2_gather_q_q: 2013 case Intrinsic::x86_avx2_gather_q_q_256: 2014 case Intrinsic::x86_avx2_gather_d_d: 2015 case Intrinsic::x86_avx2_gather_d_d_256: 2016 case Intrinsic::x86_avx2_gather_q_d: 2017 case Intrinsic::x86_avx2_gather_q_d_256: { 2018 unsigned Opc; 2019 switch (IntNo) { 2020 default: llvm_unreachable("Impossible intrinsic"); 2021 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break; 2022 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break; 2023 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break; 2024 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break; 2025 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break; 2026 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break; 2027 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break; 2028 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break; 2029 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break; 2030 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break; 2031 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break; 2032 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break; 2033 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break; 2034 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break; 2035 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break; 2036 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break; 2037 } 2038 SDNode *RetVal = SelectGather(Node, Opc); 2039 if (RetVal) 2040 // We already called ReplaceUses inside SelectGather. 2041 return NULL; 2042 break; 2043 } 2044 } 2045 break; 2046 } 2047 case X86ISD::GlobalBaseReg: 2048 return getGlobalBaseReg(); 2049 2050 2051 case X86ISD::ATOMOR64_DAG: 2052 case X86ISD::ATOMXOR64_DAG: 2053 case X86ISD::ATOMADD64_DAG: 2054 case X86ISD::ATOMSUB64_DAG: 2055 case X86ISD::ATOMNAND64_DAG: 2056 case X86ISD::ATOMAND64_DAG: 2057 case X86ISD::ATOMMAX64_DAG: 2058 case X86ISD::ATOMMIN64_DAG: 2059 case X86ISD::ATOMUMAX64_DAG: 2060 case X86ISD::ATOMUMIN64_DAG: 2061 case X86ISD::ATOMSWAP64_DAG: { 2062 unsigned Opc; 2063 switch (Opcode) { 2064 default: llvm_unreachable("Impossible opcode"); 2065 case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break; 2066 case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break; 2067 case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break; 2068 case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break; 2069 case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break; 2070 case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break; 2071 case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break; 2072 case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break; 2073 case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break; 2074 case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break; 2075 case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break; 2076 } 2077 SDNode *RetVal = SelectAtomic64(Node, Opc); 2078 if (RetVal) 2079 return RetVal; 2080 break; 2081 } 2082 2083 case ISD::ATOMIC_LOAD_XOR: 2084 case ISD::ATOMIC_LOAD_AND: 2085 case ISD::ATOMIC_LOAD_OR: 2086 case ISD::ATOMIC_LOAD_ADD: { 2087 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); 2088 if (RetVal) 2089 return RetVal; 2090 break; 2091 } 2092 case ISD::AND: 2093 case ISD::OR: 2094 case ISD::XOR: { 2095 // For operations of the form (x << C1) op C2, check if we can use a smaller 2096 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1. 2097 SDValue N0 = Node->getOperand(0); 2098 SDValue N1 = Node->getOperand(1); 2099 2100 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse()) 2101 break; 2102 2103 // i8 is unshrinkable, i16 should be promoted to i32. 2104 if (NVT != MVT::i32 && NVT != MVT::i64) 2105 break; 2106 2107 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1); 2108 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 2109 if (!Cst || !ShlCst) 2110 break; 2111 2112 int64_t Val = Cst->getSExtValue(); 2113 uint64_t ShlVal = ShlCst->getZExtValue(); 2114 2115 // Make sure that we don't change the operation by removing bits. 2116 // This only matters for OR and XOR, AND is unaffected. 2117 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1; 2118 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) 2119 break; 2120 2121 unsigned ShlOp, Op; 2122 EVT CstVT = NVT; 2123 2124 // Check the minimum bitwidth for the new constant. 2125 // TODO: AND32ri is the same as AND64ri32 with zext imm. 2126 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr 2127 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32. 2128 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal)) 2129 CstVT = MVT::i8; 2130 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal)) 2131 CstVT = MVT::i32; 2132 2133 // Bail if there is no smaller encoding. 2134 if (NVT == CstVT) 2135 break; 2136 2137 switch (NVT.getSimpleVT().SimpleTy) { 2138 default: llvm_unreachable("Unsupported VT!"); 2139 case MVT::i32: 2140 assert(CstVT == MVT::i8); 2141 ShlOp = X86::SHL32ri; 2142 2143 switch (Opcode) { 2144 default: llvm_unreachable("Impossible opcode"); 2145 case ISD::AND: Op = X86::AND32ri8; break; 2146 case ISD::OR: Op = X86::OR32ri8; break; 2147 case ISD::XOR: Op = X86::XOR32ri8; break; 2148 } 2149 break; 2150 case MVT::i64: 2151 assert(CstVT == MVT::i8 || CstVT == MVT::i32); 2152 ShlOp = X86::SHL64ri; 2153 2154 switch (Opcode) { 2155 default: llvm_unreachable("Impossible opcode"); 2156 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break; 2157 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break; 2158 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break; 2159 } 2160 break; 2161 } 2162 2163 // Emit the smaller op and the shift. 2164 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT); 2165 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst); 2166 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0), 2167 getI8Imm(ShlVal)); 2168 } 2169 case X86ISD::UMUL: { 2170 SDValue N0 = Node->getOperand(0); 2171 SDValue N1 = Node->getOperand(1); 2172 2173 unsigned LoReg; 2174 switch (NVT.getSimpleVT().SimpleTy) { 2175 default: llvm_unreachable("Unsupported VT!"); 2176 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break; 2177 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break; 2178 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break; 2179 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break; 2180 } 2181 2182 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, 2183 N0, SDValue()).getValue(1); 2184 2185 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32); 2186 SDValue Ops[] = {N1, InFlag}; 2187 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2); 2188 2189 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0)); 2190 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1)); 2191 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2)); 2192 return NULL; 2193 } 2194 2195 case ISD::SMUL_LOHI: 2196 case ISD::UMUL_LOHI: { 2197 SDValue N0 = Node->getOperand(0); 2198 SDValue N1 = Node->getOperand(1); 2199 2200 bool isSigned = Opcode == ISD::SMUL_LOHI; 2201 bool hasBMI2 = Subtarget->hasBMI2(); 2202 if (!isSigned) { 2203 switch (NVT.getSimpleVT().SimpleTy) { 2204 default: llvm_unreachable("Unsupported VT!"); 2205 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; 2206 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; 2207 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r; 2208 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break; 2209 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r; 2210 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break; 2211 } 2212 } else { 2213 switch (NVT.getSimpleVT().SimpleTy) { 2214 default: llvm_unreachable("Unsupported VT!"); 2215 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break; 2216 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break; 2217 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break; 2218 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break; 2219 } 2220 } 2221 2222 unsigned SrcReg, LoReg, HiReg; 2223 switch (Opc) { 2224 default: llvm_unreachable("Unknown MUL opcode!"); 2225 case X86::IMUL8r: 2226 case X86::MUL8r: 2227 SrcReg = LoReg = X86::AL; HiReg = X86::AH; 2228 break; 2229 case X86::IMUL16r: 2230 case X86::MUL16r: 2231 SrcReg = LoReg = X86::AX; HiReg = X86::DX; 2232 break; 2233 case X86::IMUL32r: 2234 case X86::MUL32r: 2235 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX; 2236 break; 2237 case X86::IMUL64r: 2238 case X86::MUL64r: 2239 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX; 2240 break; 2241 case X86::MULX32rr: 2242 SrcReg = X86::EDX; LoReg = HiReg = 0; 2243 break; 2244 case X86::MULX64rr: 2245 SrcReg = X86::RDX; LoReg = HiReg = 0; 2246 break; 2247 } 2248 2249 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2250 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2251 // Multiply is commmutative. 2252 if (!foldedLoad) { 2253 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2254 if (foldedLoad) 2255 std::swap(N0, N1); 2256 } 2257 2258 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg, 2259 N0, SDValue()).getValue(1); 2260 SDValue ResHi, ResLo; 2261 2262 if (foldedLoad) { 2263 SDValue Chain; 2264 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2265 InFlag }; 2266 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { 2267 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); 2268 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, 2269 array_lengthof(Ops)); 2270 ResHi = SDValue(CNode, 0); 2271 ResLo = SDValue(CNode, 1); 2272 Chain = SDValue(CNode, 2); 2273 InFlag = SDValue(CNode, 3); 2274 } else { 2275 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); 2276 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, 2277 array_lengthof(Ops)); 2278 Chain = SDValue(CNode, 0); 2279 InFlag = SDValue(CNode, 1); 2280 } 2281 2282 // Update the chain. 2283 ReplaceUses(N1.getValue(1), Chain); 2284 } else { 2285 SDValue Ops[] = { N1, InFlag }; 2286 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { 2287 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); 2288 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2289 array_lengthof(Ops)); 2290 ResHi = SDValue(CNode, 0); 2291 ResLo = SDValue(CNode, 1); 2292 InFlag = SDValue(CNode, 2); 2293 } else { 2294 SDVTList VTs = CurDAG->getVTList(MVT::Glue); 2295 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2296 array_lengthof(Ops)); 2297 InFlag = SDValue(CNode, 0); 2298 } 2299 } 2300 2301 // Prevent use of AH in a REX instruction by referencing AX instead. 2302 if (HiReg == X86::AH && Subtarget->is64Bit() && 2303 !SDValue(Node, 1).use_empty()) { 2304 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2305 X86::AX, MVT::i16, InFlag); 2306 InFlag = Result.getValue(2); 2307 // Get the low part if needed. Don't use getCopyFromReg for aliasing 2308 // registers. 2309 if (!SDValue(Node, 0).use_empty()) 2310 ReplaceUses(SDValue(Node, 1), 2311 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2312 2313 // Shift AX down 8 bits. 2314 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2315 Result, 2316 CurDAG->getTargetConstant(8, MVT::i8)), 0); 2317 // Then truncate it down to i8. 2318 ReplaceUses(SDValue(Node, 1), 2319 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2320 } 2321 // Copy the low half of the result, if it is needed. 2322 if (!SDValue(Node, 0).use_empty()) { 2323 if (ResLo.getNode() == 0) { 2324 assert(LoReg && "Register for low half is not defined!"); 2325 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, 2326 InFlag); 2327 InFlag = ResLo.getValue(2); 2328 } 2329 ReplaceUses(SDValue(Node, 0), ResLo); 2330 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n'); 2331 } 2332 // Copy the high half of the result, if it is needed. 2333 if (!SDValue(Node, 1).use_empty()) { 2334 if (ResHi.getNode() == 0) { 2335 assert(HiReg && "Register for high half is not defined!"); 2336 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, 2337 InFlag); 2338 InFlag = ResHi.getValue(2); 2339 } 2340 ReplaceUses(SDValue(Node, 1), ResHi); 2341 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); 2342 } 2343 2344 return NULL; 2345 } 2346 2347 case ISD::SDIVREM: 2348 case ISD::UDIVREM: { 2349 SDValue N0 = Node->getOperand(0); 2350 SDValue N1 = Node->getOperand(1); 2351 2352 bool isSigned = Opcode == ISD::SDIVREM; 2353 if (!isSigned) { 2354 switch (NVT.getSimpleVT().SimpleTy) { 2355 default: llvm_unreachable("Unsupported VT!"); 2356 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break; 2357 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break; 2358 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break; 2359 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break; 2360 } 2361 } else { 2362 switch (NVT.getSimpleVT().SimpleTy) { 2363 default: llvm_unreachable("Unsupported VT!"); 2364 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break; 2365 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break; 2366 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break; 2367 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break; 2368 } 2369 } 2370 2371 unsigned LoReg, HiReg, ClrReg; 2372 unsigned ClrOpcode, SExtOpcode; 2373 switch (NVT.getSimpleVT().SimpleTy) { 2374 default: llvm_unreachable("Unsupported VT!"); 2375 case MVT::i8: 2376 LoReg = X86::AL; ClrReg = HiReg = X86::AH; 2377 ClrOpcode = 0; 2378 SExtOpcode = X86::CBW; 2379 break; 2380 case MVT::i16: 2381 LoReg = X86::AX; HiReg = X86::DX; 2382 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX; 2383 SExtOpcode = X86::CWD; 2384 break; 2385 case MVT::i32: 2386 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; 2387 ClrOpcode = X86::MOV32r0; 2388 SExtOpcode = X86::CDQ; 2389 break; 2390 case MVT::i64: 2391 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; 2392 ClrOpcode = X86::MOV64r0; 2393 SExtOpcode = X86::CQO; 2394 break; 2395 } 2396 2397 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; 2398 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4); 2399 bool signBitIsZero = CurDAG->SignBitIsZero(N0); 2400 2401 SDValue InFlag; 2402 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) { 2403 // Special case for div8, just use a move with zero extension to AX to 2404 // clear the upper 8 bits (AH). 2405 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain; 2406 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) { 2407 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) }; 2408 Move = 2409 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32, 2410 MVT::Other, Ops, 2411 array_lengthof(Ops)), 0); 2412 Chain = Move.getValue(1); 2413 ReplaceUses(N0.getValue(1), Chain); 2414 } else { 2415 Move = 2416 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0); 2417 Chain = CurDAG->getEntryNode(); 2418 } 2419 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue()); 2420 InFlag = Chain.getValue(1); 2421 } else { 2422 InFlag = 2423 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, 2424 LoReg, N0, SDValue()).getValue(1); 2425 if (isSigned && !signBitIsZero) { 2426 // Sign extend the low part into the high part. 2427 InFlag = 2428 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0); 2429 } else { 2430 // Zero out the high part, effectively zero extending the input. 2431 SDValue ClrNode = 2432 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); 2433 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, 2434 ClrNode, InFlag).getValue(1); 2435 } 2436 } 2437 2438 if (foldedLoad) { 2439 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), 2440 InFlag }; 2441 SDNode *CNode = 2442 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, 2443 array_lengthof(Ops)); 2444 InFlag = SDValue(CNode, 1); 2445 // Update the chain. 2446 ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); 2447 } else { 2448 InFlag = 2449 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0); 2450 } 2451 2452 // Prevent use of AH in a REX instruction by referencing AX instead. 2453 // Shift it down 8 bits. 2454 if (HiReg == X86::AH && Subtarget->is64Bit() && 2455 !SDValue(Node, 1).use_empty()) { 2456 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2457 X86::AX, MVT::i16, InFlag); 2458 InFlag = Result.getValue(2); 2459 2460 // If we also need AL (the quotient), get it by extracting a subreg from 2461 // Result. The fast register allocator does not like multiple CopyFromReg 2462 // nodes using aliasing registers. 2463 if (!SDValue(Node, 0).use_empty()) 2464 ReplaceUses(SDValue(Node, 0), 2465 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2466 2467 // Shift AX right by 8 bits instead of using AH. 2468 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16, 2469 Result, 2470 CurDAG->getTargetConstant(8, MVT::i8)), 2471 0); 2472 ReplaceUses(SDValue(Node, 1), 2473 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result)); 2474 } 2475 // Copy the division (low) result, if it is needed. 2476 if (!SDValue(Node, 0).use_empty()) { 2477 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2478 LoReg, NVT, InFlag); 2479 InFlag = Result.getValue(2); 2480 ReplaceUses(SDValue(Node, 0), Result); 2481 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2482 } 2483 // Copy the remainder (high) result, if it is needed. 2484 if (!SDValue(Node, 1).use_empty()) { 2485 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, 2486 HiReg, NVT, InFlag); 2487 InFlag = Result.getValue(2); 2488 ReplaceUses(SDValue(Node, 1), Result); 2489 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); 2490 } 2491 return NULL; 2492 } 2493 2494 case X86ISD::CMP: 2495 case X86ISD::SUB: { 2496 // Sometimes a SUB is used to perform comparison. 2497 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0)) 2498 // This node is not a CMP. 2499 break; 2500 SDValue N0 = Node->getOperand(0); 2501 SDValue N1 = Node->getOperand(1); 2502 2503 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to 2504 // use a smaller encoding. 2505 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && 2506 HasNoSignedComparisonUses(Node)) 2507 // Look past the truncate if CMP is the only use of it. 2508 N0 = N0.getOperand(0); 2509 if ((N0.getNode()->getOpcode() == ISD::AND || 2510 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) && 2511 N0.getNode()->hasOneUse() && 2512 N0.getValueType() != MVT::i8 && 2513 X86::isZeroNode(N1)) { 2514 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1)); 2515 if (!C) break; 2516 2517 // For example, convert "testl %eax, $8" to "testb %al, $8" 2518 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 && 2519 (!(C->getZExtValue() & 0x80) || 2520 HasNoSignedComparisonUses(Node))) { 2521 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8); 2522 SDValue Reg = N0.getNode()->getOperand(0); 2523 2524 // On x86-32, only the ABCD registers have 8-bit subregisters. 2525 if (!Subtarget->is64Bit()) { 2526 const TargetRegisterClass *TRC; 2527 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2528 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2529 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2530 default: llvm_unreachable("Unsupported TEST operand type!"); 2531 } 2532 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2533 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2534 Reg.getValueType(), Reg, RC), 0); 2535 } 2536 2537 // Extract the l-register. 2538 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, 2539 MVT::i8, Reg); 2540 2541 // Emit a testb. 2542 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, 2543 Subreg, Imm); 2544 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2545 // one, do not call ReplaceAllUsesWith. 2546 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2547 SDValue(NewNode, 0)); 2548 return NULL; 2549 } 2550 2551 // For example, "testl %eax, $2048" to "testb %ah, $8". 2552 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 && 2553 (!(C->getZExtValue() & 0x8000) || 2554 HasNoSignedComparisonUses(Node))) { 2555 // Shift the immediate right by 8 bits. 2556 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8, 2557 MVT::i8); 2558 SDValue Reg = N0.getNode()->getOperand(0); 2559 2560 // Put the value in an ABCD register. 2561 const TargetRegisterClass *TRC; 2562 switch (N0.getValueType().getSimpleVT().SimpleTy) { 2563 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break; 2564 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break; 2565 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break; 2566 default: llvm_unreachable("Unsupported TEST operand type!"); 2567 } 2568 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32); 2569 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl, 2570 Reg.getValueType(), Reg, RC), 0); 2571 2572 // Extract the h-register. 2573 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, 2574 MVT::i8, Reg); 2575 2576 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only 2577 // target GR8_NOREX registers, so make sure the register class is 2578 // forced. 2579 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, 2580 MVT::i32, Subreg, ShiftedImm); 2581 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2582 // one, do not call ReplaceAllUsesWith. 2583 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2584 SDValue(NewNode, 0)); 2585 return NULL; 2586 } 2587 2588 // For example, "testl %eax, $32776" to "testw %ax, $32776". 2589 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 && 2590 N0.getValueType() != MVT::i16 && 2591 (!(C->getZExtValue() & 0x8000) || 2592 HasNoSignedComparisonUses(Node))) { 2593 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16); 2594 SDValue Reg = N0.getNode()->getOperand(0); 2595 2596 // Extract the 16-bit subregister. 2597 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, 2598 MVT::i16, Reg); 2599 2600 // Emit a testw. 2601 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, 2602 Subreg, Imm); 2603 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2604 // one, do not call ReplaceAllUsesWith. 2605 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2606 SDValue(NewNode, 0)); 2607 return NULL; 2608 } 2609 2610 // For example, "testq %rax, $268468232" to "testl %eax, $268468232". 2611 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 && 2612 N0.getValueType() == MVT::i64 && 2613 (!(C->getZExtValue() & 0x80000000) || 2614 HasNoSignedComparisonUses(Node))) { 2615 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); 2616 SDValue Reg = N0.getNode()->getOperand(0); 2617 2618 // Extract the 32-bit subregister. 2619 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl, 2620 MVT::i32, Reg); 2621 2622 // Emit a testl. 2623 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, 2624 Subreg, Imm); 2625 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has 2626 // one, do not call ReplaceAllUsesWith. 2627 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), 2628 SDValue(NewNode, 0)); 2629 return NULL; 2630 } 2631 } 2632 break; 2633 } 2634 case ISD::STORE: { 2635 // Change a chain of {load; incr or dec; store} of the same value into 2636 // a simple increment or decrement through memory of that value, if the 2637 // uses of the modified value and its address are suitable. 2638 // The DEC64m tablegen pattern is currently not able to match the case where 2639 // the EFLAGS on the original DEC are used. (This also applies to 2640 // {INC,DEC}X{64,32,16,8}.) 2641 // We'll need to improve tablegen to allow flags to be transferred from a 2642 // node in the pattern to the result node. probably with a new keyword 2643 // for example, we have this 2644 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2645 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2646 // (implicit EFLAGS)]>; 2647 // but maybe need something like this 2648 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", 2649 // [(store (add (loadi64 addr:$dst), -1), addr:$dst), 2650 // (transferrable EFLAGS)]>; 2651 2652 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 2653 SDValue StoredVal = StoreNode->getOperand(1); 2654 unsigned Opc = StoredVal->getOpcode(); 2655 2656 LoadSDNode *LoadNode = 0; 2657 SDValue InputChain; 2658 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG, 2659 LoadNode, InputChain)) 2660 break; 2661 2662 SDValue Base, Scale, Index, Disp, Segment; 2663 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(), 2664 Base, Scale, Index, Disp, Segment)) 2665 break; 2666 2667 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2); 2668 MemOp[0] = StoreNode->getMemOperand(); 2669 MemOp[1] = LoadNode->getMemOperand(); 2670 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain }; 2671 EVT LdVT = LoadNode->getMemoryVT(); 2672 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc); 2673 MachineSDNode *Result = CurDAG->getMachineNode(newOpc, 2674 Node->getDebugLoc(), 2675 MVT::i32, MVT::Other, Ops, 2676 array_lengthof(Ops)); 2677 Result->setMemRefs(MemOp, MemOp + 2); 2678 2679 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 2680 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 2681 2682 return Result; 2683 } 2684 } 2685 2686 SDNode *ResNode = SelectCode(Node); 2687 2688 DEBUG(dbgs() << "=> "; 2689 if (ResNode == NULL || ResNode == Node) 2690 Node->dump(CurDAG); 2691 else 2692 ResNode->dump(CurDAG); 2693 dbgs() << '\n'); 2694 2695 return ResNode; 2696 } 2697 2698 bool X86DAGToDAGISel:: 2699 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, 2700 std::vector<SDValue> &OutOps) { 2701 SDValue Op0, Op1, Op2, Op3, Op4; 2702 switch (ConstraintCode) { 2703 case 'o': // offsetable ?? 2704 case 'v': // not offsetable ?? 2705 default: return true; 2706 case 'm': // memory 2707 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4)) 2708 return true; 2709 break; 2710 } 2711 2712 OutOps.push_back(Op0); 2713 OutOps.push_back(Op1); 2714 OutOps.push_back(Op2); 2715 OutOps.push_back(Op3); 2716 OutOps.push_back(Op4); 2717 return false; 2718 } 2719 2720 /// createX86ISelDag - This pass converts a legalized DAG into a 2721 /// X86-specific DAG, ready for instruction scheduling. 2722 /// 2723 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, 2724 CodeGenOpt::Level OptLevel) { 2725 return new X86DAGToDAGISel(TM, OptLevel); 2726 } 2727