1 //===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This implements the SelectionDAG class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/SelectionDAG.h" 15 #include "SDNodeDbgValue.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/Assembly/Writer.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineConstantPool.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/CodeGen/MachineModuleInfo.h" 28 #include "llvm/DebugInfo.h" 29 #include "llvm/IR/CallingConv.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/GlobalAlias.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/Debug.h" 39 #include "llvm/Support/ErrorHandling.h" 40 #include "llvm/Support/ManagedStatic.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/Mutex.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Target/TargetInstrInfo.h" 45 #include "llvm/Target/TargetIntrinsicInfo.h" 46 #include "llvm/Target/TargetLowering.h" 47 #include "llvm/Target/TargetMachine.h" 48 #include "llvm/Target/TargetOptions.h" 49 #include "llvm/Target/TargetRegisterInfo.h" 50 #include "llvm/Target/TargetSelectionDAGInfo.h" 51 #include <algorithm> 52 #include <cmath> 53 using namespace llvm; 54 55 /// makeVTList - Return an instance of the SDVTList struct initialized with the 56 /// specified members. 57 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { 58 SDVTList Res = {VTs, NumVTs}; 59 return Res; 60 } 61 62 // Default null implementations of the callbacks. 63 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {} 64 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {} 65 66 //===----------------------------------------------------------------------===// 67 // ConstantFPSDNode Class 68 //===----------------------------------------------------------------------===// 69 70 /// isExactlyValue - We don't rely on operator== working on double values, as 71 /// it returns true for things that are clearly not equal, like -0.0 and 0.0. 72 /// As such, this method can be used to do an exact bit-for-bit comparison of 73 /// two floating point values. 74 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const { 75 return getValueAPF().bitwiseIsEqual(V); 76 } 77 78 bool ConstantFPSDNode::isValueValidForType(EVT VT, 79 const APFloat& Val) { 80 assert(VT.isFloatingPoint() && "Can only convert between FP types"); 81 82 // convert modifies in place, so make a copy. 83 APFloat Val2 = APFloat(Val); 84 bool losesInfo; 85 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT), 86 APFloat::rmNearestTiesToEven, 87 &losesInfo); 88 return !losesInfo; 89 } 90 91 //===----------------------------------------------------------------------===// 92 // ISD Namespace 93 //===----------------------------------------------------------------------===// 94 95 /// isBuildVectorAllOnes - Return true if the specified node is a 96 /// BUILD_VECTOR where all of the elements are ~0 or undef. 97 bool ISD::isBuildVectorAllOnes(const SDNode *N) { 98 // Look through a bit convert. 99 if (N->getOpcode() == ISD::BITCAST) 100 N = N->getOperand(0).getNode(); 101 102 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 103 104 unsigned i = 0, e = N->getNumOperands(); 105 106 // Skip over all of the undef values. 107 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 108 ++i; 109 110 // Do not accept an all-undef vector. 111 if (i == e) return false; 112 113 // Do not accept build_vectors that aren't all constants or which have non-~0 114 // elements. We have to be a bit careful here, as the type of the constant 115 // may not be the same as the type of the vector elements due to type 116 // legalization (the elements are promoted to a legal type for the target and 117 // a vector of a type may be legal when the base element type is not). 118 // We only want to check enough bits to cover the vector elements, because 119 // we care if the resultant vector is all ones, not whether the individual 120 // constants are. 121 SDValue NotZero = N->getOperand(i); 122 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); 123 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) { 124 if (CN->getAPIntValue().countTrailingOnes() < EltSize) 125 return false; 126 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) { 127 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) 128 return false; 129 } else 130 return false; 131 132 // Okay, we have at least one ~0 value, check to see if the rest match or are 133 // undefs. Even with the above element type twiddling, this should be OK, as 134 // the same type legalization should have applied to all the elements. 135 for (++i; i != e; ++i) 136 if (N->getOperand(i) != NotZero && 137 N->getOperand(i).getOpcode() != ISD::UNDEF) 138 return false; 139 return true; 140 } 141 142 143 /// isBuildVectorAllZeros - Return true if the specified node is a 144 /// BUILD_VECTOR where all of the elements are 0 or undef. 145 bool ISD::isBuildVectorAllZeros(const SDNode *N) { 146 // Look through a bit convert. 147 if (N->getOpcode() == ISD::BITCAST) 148 N = N->getOperand(0).getNode(); 149 150 if (N->getOpcode() != ISD::BUILD_VECTOR) return false; 151 152 unsigned i = 0, e = N->getNumOperands(); 153 154 // Skip over all of the undef values. 155 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF) 156 ++i; 157 158 // Do not accept an all-undef vector. 159 if (i == e) return false; 160 161 // Do not accept build_vectors that aren't all constants or which have non-0 162 // elements. 163 SDValue Zero = N->getOperand(i); 164 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) { 165 if (!CN->isNullValue()) 166 return false; 167 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) { 168 if (!CFPN->getValueAPF().isPosZero()) 169 return false; 170 } else 171 return false; 172 173 // Okay, we have at least one 0 value, check to see if the rest match or are 174 // undefs. 175 for (++i; i != e; ++i) 176 if (N->getOperand(i) != Zero && 177 N->getOperand(i).getOpcode() != ISD::UNDEF) 178 return false; 179 return true; 180 } 181 182 /// isScalarToVector - Return true if the specified node is a 183 /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low 184 /// element is not an undef. 185 bool ISD::isScalarToVector(const SDNode *N) { 186 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR) 187 return true; 188 189 if (N->getOpcode() != ISD::BUILD_VECTOR) 190 return false; 191 if (N->getOperand(0).getOpcode() == ISD::UNDEF) 192 return false; 193 unsigned NumElems = N->getNumOperands(); 194 if (NumElems == 1) 195 return false; 196 for (unsigned i = 1; i < NumElems; ++i) { 197 SDValue V = N->getOperand(i); 198 if (V.getOpcode() != ISD::UNDEF) 199 return false; 200 } 201 return true; 202 } 203 204 /// allOperandsUndef - Return true if the node has at least one operand 205 /// and all operands of the specified node are ISD::UNDEF. 206 bool ISD::allOperandsUndef(const SDNode *N) { 207 // Return false if the node has no operands. 208 // This is "logically inconsistent" with the definition of "all" but 209 // is probably the desired behavior. 210 if (N->getNumOperands() == 0) 211 return false; 212 213 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i) 214 if (N->getOperand(i).getOpcode() != ISD::UNDEF) 215 return false; 216 217 return true; 218 } 219 220 /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X) 221 /// when given the operation for (X op Y). 222 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) { 223 // To perform this operation, we just need to swap the L and G bits of the 224 // operation. 225 unsigned OldL = (Operation >> 2) & 1; 226 unsigned OldG = (Operation >> 1) & 1; 227 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits 228 (OldL << 1) | // New G bit 229 (OldG << 2)); // New L bit. 230 } 231 232 /// getSetCCInverse - Return the operation corresponding to !(X op Y), where 233 /// 'op' is a valid SetCC operation. 234 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) { 235 unsigned Operation = Op; 236 if (isInteger) 237 Operation ^= 7; // Flip L, G, E bits, but not U. 238 else 239 Operation ^= 15; // Flip all of the condition bits. 240 241 if (Operation > ISD::SETTRUE2) 242 Operation &= ~8; // Don't let N and U bits get set. 243 244 return ISD::CondCode(Operation); 245 } 246 247 248 /// isSignedOp - For an integer comparison, return 1 if the comparison is a 249 /// signed operation and 2 if the result is an unsigned comparison. Return zero 250 /// if the operation does not depend on the sign of the input (setne and seteq). 251 static int isSignedOp(ISD::CondCode Opcode) { 252 switch (Opcode) { 253 default: llvm_unreachable("Illegal integer setcc operation!"); 254 case ISD::SETEQ: 255 case ISD::SETNE: return 0; 256 case ISD::SETLT: 257 case ISD::SETLE: 258 case ISD::SETGT: 259 case ISD::SETGE: return 1; 260 case ISD::SETULT: 261 case ISD::SETULE: 262 case ISD::SETUGT: 263 case ISD::SETUGE: return 2; 264 } 265 } 266 267 /// getSetCCOrOperation - Return the result of a logical OR between different 268 /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function 269 /// returns SETCC_INVALID if it is not possible to represent the resultant 270 /// comparison. 271 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2, 272 bool isInteger) { 273 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 274 // Cannot fold a signed integer setcc with an unsigned integer setcc. 275 return ISD::SETCC_INVALID; 276 277 unsigned Op = Op1 | Op2; // Combine all of the condition bits. 278 279 // If the N and U bits get set then the resultant comparison DOES suddenly 280 // care about orderedness, and is true when ordered. 281 if (Op > ISD::SETTRUE2) 282 Op &= ~16; // Clear the U bit if the N bit is set. 283 284 // Canonicalize illegal integer setcc's. 285 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT 286 Op = ISD::SETNE; 287 288 return ISD::CondCode(Op); 289 } 290 291 /// getSetCCAndOperation - Return the result of a logical AND between different 292 /// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This 293 /// function returns zero if it is not possible to represent the resultant 294 /// comparison. 295 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2, 296 bool isInteger) { 297 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3) 298 // Cannot fold a signed setcc with an unsigned setcc. 299 return ISD::SETCC_INVALID; 300 301 // Combine all of the condition bits. 302 ISD::CondCode Result = ISD::CondCode(Op1 & Op2); 303 304 // Canonicalize illegal integer setcc's. 305 if (isInteger) { 306 switch (Result) { 307 default: break; 308 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT 309 case ISD::SETOEQ: // SETEQ & SETU[LG]E 310 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE 311 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE 312 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE 313 } 314 } 315 316 return Result; 317 } 318 319 //===----------------------------------------------------------------------===// 320 // SDNode Profile Support 321 //===----------------------------------------------------------------------===// 322 323 /// AddNodeIDOpcode - Add the node opcode to the NodeID data. 324 /// 325 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) { 326 ID.AddInteger(OpC); 327 } 328 329 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them 330 /// solely with their pointer. 331 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { 332 ID.AddPointer(VTList.VTs); 333 } 334 335 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 336 /// 337 static void AddNodeIDOperands(FoldingSetNodeID &ID, 338 const SDValue *Ops, unsigned NumOps) { 339 for (; NumOps; --NumOps, ++Ops) { 340 ID.AddPointer(Ops->getNode()); 341 ID.AddInteger(Ops->getResNo()); 342 } 343 } 344 345 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. 346 /// 347 static void AddNodeIDOperands(FoldingSetNodeID &ID, 348 const SDUse *Ops, unsigned NumOps) { 349 for (; NumOps; --NumOps, ++Ops) { 350 ID.AddPointer(Ops->getNode()); 351 ID.AddInteger(Ops->getResNo()); 352 } 353 } 354 355 static void AddNodeIDNode(FoldingSetNodeID &ID, 356 unsigned short OpC, SDVTList VTList, 357 const SDValue *OpList, unsigned N) { 358 AddNodeIDOpcode(ID, OpC); 359 AddNodeIDValueTypes(ID, VTList); 360 AddNodeIDOperands(ID, OpList, N); 361 } 362 363 /// AddNodeIDCustom - If this is an SDNode with special info, add this info to 364 /// the NodeID data. 365 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { 366 switch (N->getOpcode()) { 367 case ISD::TargetExternalSymbol: 368 case ISD::ExternalSymbol: 369 llvm_unreachable("Should only be used on nodes with operands"); 370 default: break; // Normal nodes don't need extra info. 371 case ISD::TargetConstant: 372 case ISD::Constant: 373 ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue()); 374 break; 375 case ISD::TargetConstantFP: 376 case ISD::ConstantFP: { 377 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue()); 378 break; 379 } 380 case ISD::TargetGlobalAddress: 381 case ISD::GlobalAddress: 382 case ISD::TargetGlobalTLSAddress: 383 case ISD::GlobalTLSAddress: { 384 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N); 385 ID.AddPointer(GA->getGlobal()); 386 ID.AddInteger(GA->getOffset()); 387 ID.AddInteger(GA->getTargetFlags()); 388 ID.AddInteger(GA->getAddressSpace()); 389 break; 390 } 391 case ISD::BasicBlock: 392 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock()); 393 break; 394 case ISD::Register: 395 ID.AddInteger(cast<RegisterSDNode>(N)->getReg()); 396 break; 397 case ISD::RegisterMask: 398 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask()); 399 break; 400 case ISD::SRCVALUE: 401 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue()); 402 break; 403 case ISD::FrameIndex: 404 case ISD::TargetFrameIndex: 405 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex()); 406 break; 407 case ISD::JumpTable: 408 case ISD::TargetJumpTable: 409 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex()); 410 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags()); 411 break; 412 case ISD::ConstantPool: 413 case ISD::TargetConstantPool: { 414 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); 415 ID.AddInteger(CP->getAlignment()); 416 ID.AddInteger(CP->getOffset()); 417 if (CP->isMachineConstantPoolEntry()) 418 CP->getMachineCPVal()->addSelectionDAGCSEId(ID); 419 else 420 ID.AddPointer(CP->getConstVal()); 421 ID.AddInteger(CP->getTargetFlags()); 422 break; 423 } 424 case ISD::TargetIndex: { 425 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N); 426 ID.AddInteger(TI->getIndex()); 427 ID.AddInteger(TI->getOffset()); 428 ID.AddInteger(TI->getTargetFlags()); 429 break; 430 } 431 case ISD::LOAD: { 432 const LoadSDNode *LD = cast<LoadSDNode>(N); 433 ID.AddInteger(LD->getMemoryVT().getRawBits()); 434 ID.AddInteger(LD->getRawSubclassData()); 435 ID.AddInteger(LD->getPointerInfo().getAddrSpace()); 436 break; 437 } 438 case ISD::STORE: { 439 const StoreSDNode *ST = cast<StoreSDNode>(N); 440 ID.AddInteger(ST->getMemoryVT().getRawBits()); 441 ID.AddInteger(ST->getRawSubclassData()); 442 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 443 break; 444 } 445 case ISD::ATOMIC_CMP_SWAP: 446 case ISD::ATOMIC_SWAP: 447 case ISD::ATOMIC_LOAD_ADD: 448 case ISD::ATOMIC_LOAD_SUB: 449 case ISD::ATOMIC_LOAD_AND: 450 case ISD::ATOMIC_LOAD_OR: 451 case ISD::ATOMIC_LOAD_XOR: 452 case ISD::ATOMIC_LOAD_NAND: 453 case ISD::ATOMIC_LOAD_MIN: 454 case ISD::ATOMIC_LOAD_MAX: 455 case ISD::ATOMIC_LOAD_UMIN: 456 case ISD::ATOMIC_LOAD_UMAX: 457 case ISD::ATOMIC_LOAD: 458 case ISD::ATOMIC_STORE: { 459 const AtomicSDNode *AT = cast<AtomicSDNode>(N); 460 ID.AddInteger(AT->getMemoryVT().getRawBits()); 461 ID.AddInteger(AT->getRawSubclassData()); 462 ID.AddInteger(AT->getPointerInfo().getAddrSpace()); 463 break; 464 } 465 case ISD::PREFETCH: { 466 const MemSDNode *PF = cast<MemSDNode>(N); 467 ID.AddInteger(PF->getPointerInfo().getAddrSpace()); 468 break; 469 } 470 case ISD::VECTOR_SHUFFLE: { 471 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 472 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); 473 i != e; ++i) 474 ID.AddInteger(SVN->getMaskElt(i)); 475 break; 476 } 477 case ISD::TargetBlockAddress: 478 case ISD::BlockAddress: { 479 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N); 480 ID.AddPointer(BA->getBlockAddress()); 481 ID.AddInteger(BA->getOffset()); 482 ID.AddInteger(BA->getTargetFlags()); 483 break; 484 } 485 } // end switch (N->getOpcode()) 486 487 // Target specific memory nodes could also have address spaces to check. 488 if (N->isTargetMemoryOpcode()) 489 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace()); 490 } 491 492 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID 493 /// data. 494 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) { 495 AddNodeIDOpcode(ID, N->getOpcode()); 496 // Add the return value info. 497 AddNodeIDValueTypes(ID, N->getVTList()); 498 // Add the operand info. 499 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands()); 500 501 // Handle SDNode leafs with special info. 502 AddNodeIDCustom(ID, N); 503 } 504 505 /// encodeMemSDNodeFlags - Generic routine for computing a value for use in 506 /// the CSE map that carries volatility, temporalness, indexing mode, and 507 /// extension/truncation information. 508 /// 509 static inline unsigned 510 encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile, 511 bool isNonTemporal, bool isInvariant) { 512 assert((ConvType & 3) == ConvType && 513 "ConvType may not require more than 2 bits!"); 514 assert((AM & 7) == AM && 515 "AM may not require more than 3 bits!"); 516 return ConvType | 517 (AM << 2) | 518 (isVolatile << 5) | 519 (isNonTemporal << 6) | 520 (isInvariant << 7); 521 } 522 523 //===----------------------------------------------------------------------===// 524 // SelectionDAG Class 525 //===----------------------------------------------------------------------===// 526 527 /// doNotCSE - Return true if CSE should not be performed for this node. 528 static bool doNotCSE(SDNode *N) { 529 if (N->getValueType(0) == MVT::Glue) 530 return true; // Never CSE anything that produces a flag. 531 532 switch (N->getOpcode()) { 533 default: break; 534 case ISD::HANDLENODE: 535 case ISD::EH_LABEL: 536 return true; // Never CSE these nodes. 537 } 538 539 // Check that remaining values produced are not flags. 540 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i) 541 if (N->getValueType(i) == MVT::Glue) 542 return true; // Never CSE anything that produces a flag. 543 544 return false; 545 } 546 547 /// RemoveDeadNodes - This method deletes all unreachable nodes in the 548 /// SelectionDAG. 549 void SelectionDAG::RemoveDeadNodes() { 550 // Create a dummy node (which is not added to allnodes), that adds a reference 551 // to the root node, preventing it from being deleted. 552 HandleSDNode Dummy(getRoot()); 553 554 SmallVector<SDNode*, 128> DeadNodes; 555 556 // Add all obviously-dead nodes to the DeadNodes worklist. 557 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I) 558 if (I->use_empty()) 559 DeadNodes.push_back(I); 560 561 RemoveDeadNodes(DeadNodes); 562 563 // If the root changed (e.g. it was a dead load, update the root). 564 setRoot(Dummy.getValue()); 565 } 566 567 /// RemoveDeadNodes - This method deletes the unreachable nodes in the 568 /// given list, and any nodes that become unreachable as a result. 569 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) { 570 571 // Process the worklist, deleting the nodes and adding their uses to the 572 // worklist. 573 while (!DeadNodes.empty()) { 574 SDNode *N = DeadNodes.pop_back_val(); 575 576 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 577 DUL->NodeDeleted(N, 0); 578 579 // Take the node out of the appropriate CSE map. 580 RemoveNodeFromCSEMaps(N); 581 582 // Next, brutally remove the operand list. This is safe to do, as there are 583 // no cycles in the graph. 584 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 585 SDUse &Use = *I++; 586 SDNode *Operand = Use.getNode(); 587 Use.set(SDValue()); 588 589 // Now that we removed this operand, see if there are no uses of it left. 590 if (Operand->use_empty()) 591 DeadNodes.push_back(Operand); 592 } 593 594 DeallocateNode(N); 595 } 596 } 597 598 void SelectionDAG::RemoveDeadNode(SDNode *N){ 599 SmallVector<SDNode*, 16> DeadNodes(1, N); 600 601 // Create a dummy node that adds a reference to the root node, preventing 602 // it from being deleted. (This matters if the root is an operand of the 603 // dead node.) 604 HandleSDNode Dummy(getRoot()); 605 606 RemoveDeadNodes(DeadNodes); 607 } 608 609 void SelectionDAG::DeleteNode(SDNode *N) { 610 // First take this out of the appropriate CSE map. 611 RemoveNodeFromCSEMaps(N); 612 613 // Finally, remove uses due to operands of this node, remove from the 614 // AllNodes list, and delete the node. 615 DeleteNodeNotInCSEMaps(N); 616 } 617 618 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { 619 assert(N != AllNodes.begin() && "Cannot delete the entry node!"); 620 assert(N->use_empty() && "Cannot delete a node that is not dead!"); 621 622 // Drop all of the operands and decrement used node's use counts. 623 N->DropOperands(); 624 625 DeallocateNode(N); 626 } 627 628 void SelectionDAG::DeallocateNode(SDNode *N) { 629 if (N->OperandsNeedDelete) 630 delete[] N->OperandList; 631 632 // Set the opcode to DELETED_NODE to help catch bugs when node 633 // memory is reallocated. 634 N->NodeType = ISD::DELETED_NODE; 635 636 NodeAllocator.Deallocate(AllNodes.remove(N)); 637 638 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them. 639 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N); 640 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i) 641 DbgVals[i]->setIsInvalidated(); 642 } 643 644 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that 645 /// correspond to it. This is useful when we're about to delete or repurpose 646 /// the node. We don't want future request for structurally identical nodes 647 /// to return N anymore. 648 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) { 649 bool Erased = false; 650 switch (N->getOpcode()) { 651 case ISD::HANDLENODE: return false; // noop. 652 case ISD::CONDCODE: 653 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] && 654 "Cond code doesn't exist!"); 655 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0; 656 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0; 657 break; 658 case ISD::ExternalSymbol: 659 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol()); 660 break; 661 case ISD::TargetExternalSymbol: { 662 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N); 663 Erased = TargetExternalSymbols.erase( 664 std::pair<std::string,unsigned char>(ESN->getSymbol(), 665 ESN->getTargetFlags())); 666 break; 667 } 668 case ISD::VALUETYPE: { 669 EVT VT = cast<VTSDNode>(N)->getVT(); 670 if (VT.isExtended()) { 671 Erased = ExtendedValueTypeNodes.erase(VT); 672 } else { 673 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0; 674 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0; 675 } 676 break; 677 } 678 default: 679 // Remove it from the CSE Map. 680 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!"); 681 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!"); 682 Erased = CSEMap.RemoveNode(N); 683 break; 684 } 685 #ifndef NDEBUG 686 // Verify that the node was actually in one of the CSE maps, unless it has a 687 // flag result (which cannot be CSE'd) or is one of the special cases that are 688 // not subject to CSE. 689 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue && 690 !N->isMachineOpcode() && !doNotCSE(N)) { 691 N->dump(this); 692 dbgs() << "\n"; 693 llvm_unreachable("Node is not in map!"); 694 } 695 #endif 696 return Erased; 697 } 698 699 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE 700 /// maps and modified in place. Add it back to the CSE maps, unless an identical 701 /// node already exists, in which case transfer all its users to the existing 702 /// node. This transfer can potentially trigger recursive merging. 703 /// 704 void 705 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) { 706 // For node types that aren't CSE'd, just act as if no identical node 707 // already exists. 708 if (!doNotCSE(N)) { 709 SDNode *Existing = CSEMap.GetOrInsertNode(N); 710 if (Existing != N) { 711 // If there was already an existing matching node, use ReplaceAllUsesWith 712 // to replace the dead one with the existing one. This can cause 713 // recursive merging of other unrelated nodes down the line. 714 ReplaceAllUsesWith(N, Existing); 715 716 // N is now dead. Inform the listeners and delete it. 717 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 718 DUL->NodeDeleted(N, Existing); 719 DeleteNodeNotInCSEMaps(N); 720 return; 721 } 722 } 723 724 // If the node doesn't already exist, we updated it. Inform listeners. 725 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next) 726 DUL->NodeUpdated(N); 727 } 728 729 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 730 /// were replaced with those specified. If this node is never memoized, 731 /// return null, otherwise return a pointer to the slot it would take. If a 732 /// node already exists with these operands, the slot will be non-null. 733 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, 734 void *&InsertPos) { 735 if (doNotCSE(N)) 736 return 0; 737 738 SDValue Ops[] = { Op }; 739 FoldingSetNodeID ID; 740 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1); 741 AddNodeIDCustom(ID, N); 742 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 743 return Node; 744 } 745 746 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 747 /// were replaced with those specified. If this node is never memoized, 748 /// return null, otherwise return a pointer to the slot it would take. If a 749 /// node already exists with these operands, the slot will be non-null. 750 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 751 SDValue Op1, SDValue Op2, 752 void *&InsertPos) { 753 if (doNotCSE(N)) 754 return 0; 755 756 SDValue Ops[] = { Op1, Op2 }; 757 FoldingSetNodeID ID; 758 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2); 759 AddNodeIDCustom(ID, N); 760 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 761 return Node; 762 } 763 764 765 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands 766 /// were replaced with those specified. If this node is never memoized, 767 /// return null, otherwise return a pointer to the slot it would take. If a 768 /// node already exists with these operands, the slot will be non-null. 769 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, 770 const SDValue *Ops,unsigned NumOps, 771 void *&InsertPos) { 772 if (doNotCSE(N)) 773 return 0; 774 775 FoldingSetNodeID ID; 776 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps); 777 AddNodeIDCustom(ID, N); 778 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); 779 return Node; 780 } 781 782 #ifndef NDEBUG 783 /// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid. 784 static void VerifyNodeCommon(SDNode *N) { 785 switch (N->getOpcode()) { 786 default: 787 break; 788 case ISD::BUILD_PAIR: { 789 EVT VT = N->getValueType(0); 790 assert(N->getNumValues() == 1 && "Too many results!"); 791 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) && 792 "Wrong return type!"); 793 assert(N->getNumOperands() == 2 && "Wrong number of operands!"); 794 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() && 795 "Mismatched operand types!"); 796 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() && 797 "Wrong operand type!"); 798 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() && 799 "Wrong return type size"); 800 break; 801 } 802 case ISD::BUILD_VECTOR: { 803 assert(N->getNumValues() == 1 && "Too many results!"); 804 assert(N->getValueType(0).isVector() && "Wrong return type!"); 805 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() && 806 "Wrong number of operands!"); 807 EVT EltVT = N->getValueType(0).getVectorElementType(); 808 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) { 809 assert((I->getValueType() == EltVT || 810 (EltVT.isInteger() && I->getValueType().isInteger() && 811 EltVT.bitsLE(I->getValueType()))) && 812 "Wrong operand type!"); 813 assert(I->getValueType() == N->getOperand(0).getValueType() && 814 "Operands must all have the same type"); 815 } 816 break; 817 } 818 } 819 } 820 821 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid. 822 static void VerifySDNode(SDNode *N) { 823 // The SDNode allocators cannot be used to allocate nodes with fields that are 824 // not present in an SDNode! 825 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!"); 826 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!"); 827 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!"); 828 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!"); 829 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!"); 830 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!"); 831 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!"); 832 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!"); 833 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!"); 834 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!"); 835 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!"); 836 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!"); 837 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!"); 838 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!"); 839 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!"); 840 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!"); 841 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!"); 842 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!"); 843 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!"); 844 845 VerifyNodeCommon(N); 846 } 847 848 /// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is 849 /// invalid. 850 static void VerifyMachineNode(SDNode *N) { 851 // The MachineNode allocators cannot be used to allocate nodes with fields 852 // that are not present in a MachineNode! 853 // Currently there are no such nodes. 854 855 VerifyNodeCommon(N); 856 } 857 #endif // NDEBUG 858 859 /// getEVTAlignment - Compute the default alignment value for the 860 /// given type. 861 /// 862 unsigned SelectionDAG::getEVTAlignment(EVT VT) const { 863 Type *Ty = VT == MVT::iPTR ? 864 PointerType::get(Type::getInt8Ty(*getContext()), 0) : 865 VT.getTypeForEVT(*getContext()); 866 867 return TM.getTargetLowering()->getDataLayout()->getABITypeAlignment(Ty); 868 } 869 870 // EntryNode could meaningfully have debug info if we can find it... 871 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL) 872 : TM(tm), TSI(*tm.getSelectionDAGInfo()), TTI(0), OptLevel(OL), 873 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)), 874 Root(getEntryNode()), UpdateListeners(0) { 875 AllNodes.push_back(&EntryNode); 876 DbgInfo = new SDDbgInfo(); 877 } 878 879 void SelectionDAG::init(MachineFunction &mf, const TargetTransformInfo *tti) { 880 MF = &mf; 881 TTI = tti; 882 Context = &mf.getFunction()->getContext(); 883 } 884 885 SelectionDAG::~SelectionDAG() { 886 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners"); 887 allnodes_clear(); 888 delete DbgInfo; 889 } 890 891 void SelectionDAG::allnodes_clear() { 892 assert(&*AllNodes.begin() == &EntryNode); 893 AllNodes.remove(AllNodes.begin()); 894 while (!AllNodes.empty()) 895 DeallocateNode(AllNodes.begin()); 896 } 897 898 void SelectionDAG::clear() { 899 allnodes_clear(); 900 OperandAllocator.Reset(); 901 CSEMap.clear(); 902 903 ExtendedValueTypeNodes.clear(); 904 ExternalSymbols.clear(); 905 TargetExternalSymbols.clear(); 906 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(), 907 static_cast<CondCodeSDNode*>(0)); 908 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(), 909 static_cast<SDNode*>(0)); 910 911 EntryNode.UseList = 0; 912 AllNodes.push_back(&EntryNode); 913 Root = getEntryNode(); 914 DbgInfo->clear(); 915 } 916 917 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 918 return VT.bitsGT(Op.getValueType()) ? 919 getNode(ISD::ANY_EXTEND, DL, VT, Op) : 920 getNode(ISD::TRUNCATE, DL, VT, Op); 921 } 922 923 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 924 return VT.bitsGT(Op.getValueType()) ? 925 getNode(ISD::SIGN_EXTEND, DL, VT, Op) : 926 getNode(ISD::TRUNCATE, DL, VT, Op); 927 } 928 929 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT) { 930 return VT.bitsGT(Op.getValueType()) ? 931 getNode(ISD::ZERO_EXTEND, DL, VT, Op) : 932 getNode(ISD::TRUNCATE, DL, VT, Op); 933 } 934 935 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, SDLoc DL, EVT VT) { 936 assert(!VT.isVector() && 937 "getZeroExtendInReg should use the vector element type instead of " 938 "the vector type!"); 939 if (Op.getValueType() == VT) return Op; 940 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 941 APInt Imm = APInt::getLowBitsSet(BitWidth, 942 VT.getSizeInBits()); 943 return getNode(ISD::AND, DL, Op.getValueType(), Op, 944 getConstant(Imm, Op.getValueType())); 945 } 946 947 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1). 948 /// 949 SDValue SelectionDAG::getNOT(SDLoc DL, SDValue Val, EVT VT) { 950 EVT EltVT = VT.getScalarType(); 951 SDValue NegOne = 952 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT); 953 return getNode(ISD::XOR, DL, VT, Val, NegOne); 954 } 955 956 SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) { 957 EVT EltVT = VT.getScalarType(); 958 assert((EltVT.getSizeInBits() >= 64 || 959 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && 960 "getConstant with a uint64_t value that doesn't fit in the type!"); 961 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT); 962 } 963 964 SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) { 965 return getConstant(*ConstantInt::get(*Context, Val), VT, isT); 966 } 967 968 SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) { 969 assert(VT.isInteger() && "Cannot create FP integer constant!"); 970 971 EVT EltVT = VT.getScalarType(); 972 const ConstantInt *Elt = &Val; 973 974 const TargetLowering *TLI = TM.getTargetLowering(); 975 976 // In some cases the vector type is legal but the element type is illegal and 977 // needs to be promoted, for example v8i8 on ARM. In this case, promote the 978 // inserted value (the type does not need to match the vector element type). 979 // Any extra bits introduced will be truncated away. 980 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) == 981 TargetLowering::TypePromoteInteger) { 982 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT); 983 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits()); 984 Elt = ConstantInt::get(*getContext(), NewVal); 985 } 986 987 assert(Elt->getBitWidth() == EltVT.getSizeInBits() && 988 "APInt size does not match type size!"); 989 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant; 990 FoldingSetNodeID ID; 991 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0); 992 ID.AddPointer(Elt); 993 void *IP = 0; 994 SDNode *N = NULL; 995 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 996 if (!VT.isVector()) 997 return SDValue(N, 0); 998 999 if (!N) { 1000 N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT); 1001 CSEMap.InsertNode(N, IP); 1002 AllNodes.push_back(N); 1003 } 1004 1005 SDValue Result(N, 0); 1006 if (VT.isVector()) { 1007 SmallVector<SDValue, 8> Ops; 1008 Ops.assign(VT.getVectorNumElements(), Result); 1009 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size()); 1010 } 1011 return Result; 1012 } 1013 1014 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) { 1015 return getConstant(Val, TM.getTargetLowering()->getPointerTy(), isTarget); 1016 } 1017 1018 1019 SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) { 1020 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget); 1021 } 1022 1023 SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){ 1024 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); 1025 1026 EVT EltVT = VT.getScalarType(); 1027 1028 // Do the map lookup using the actual bit pattern for the floating point 1029 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and 1030 // we don't have issues with SNANs. 1031 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP; 1032 FoldingSetNodeID ID; 1033 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0); 1034 ID.AddPointer(&V); 1035 void *IP = 0; 1036 SDNode *N = NULL; 1037 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) 1038 if (!VT.isVector()) 1039 return SDValue(N, 0); 1040 1041 if (!N) { 1042 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT); 1043 CSEMap.InsertNode(N, IP); 1044 AllNodes.push_back(N); 1045 } 1046 1047 SDValue Result(N, 0); 1048 if (VT.isVector()) { 1049 SmallVector<SDValue, 8> Ops; 1050 Ops.assign(VT.getVectorNumElements(), Result); 1051 // FIXME SDLoc info might be appropriate here 1052 Result = getNode(ISD::BUILD_VECTOR, SDLoc(), VT, &Ops[0], Ops.size()); 1053 } 1054 return Result; 1055 } 1056 1057 SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) { 1058 EVT EltVT = VT.getScalarType(); 1059 if (EltVT==MVT::f32) 1060 return getConstantFP(APFloat((float)Val), VT, isTarget); 1061 else if (EltVT==MVT::f64) 1062 return getConstantFP(APFloat(Val), VT, isTarget); 1063 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 || 1064 EltVT==MVT::f16) { 1065 bool ignored; 1066 APFloat apf = APFloat(Val); 1067 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven, 1068 &ignored); 1069 return getConstantFP(apf, VT, isTarget); 1070 } else 1071 llvm_unreachable("Unsupported type in getConstantFP"); 1072 } 1073 1074 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, SDLoc DL, 1075 EVT VT, int64_t Offset, 1076 bool isTargetGA, 1077 unsigned char TargetFlags) { 1078 assert((TargetFlags == 0 || isTargetGA) && 1079 "Cannot set target flags on target-independent globals"); 1080 1081 // Truncate (with sign-extension) the offset value to the pointer size. 1082 unsigned BitWidth = TM.getTargetLowering()->getPointerTy().getSizeInBits(); 1083 if (BitWidth < 64) 1084 Offset = SignExtend64(Offset, BitWidth); 1085 1086 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1087 if (!GVar) { 1088 // If GV is an alias then use the aliasee for determining thread-localness. 1089 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 1090 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)); 1091 } 1092 1093 unsigned Opc; 1094 if (GVar && GVar->isThreadLocal()) 1095 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress; 1096 else 1097 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress; 1098 1099 FoldingSetNodeID ID; 1100 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1101 ID.AddPointer(GV); 1102 ID.AddInteger(Offset); 1103 ID.AddInteger(TargetFlags); 1104 ID.AddInteger(GV->getType()->getAddressSpace()); 1105 void *IP = 0; 1106 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1107 return SDValue(E, 0); 1108 1109 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL.getIROrder(), 1110 DL.getDebugLoc(), GV, VT, 1111 Offset, TargetFlags); 1112 CSEMap.InsertNode(N, IP); 1113 AllNodes.push_back(N); 1114 return SDValue(N, 0); 1115 } 1116 1117 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { 1118 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; 1119 FoldingSetNodeID ID; 1120 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1121 ID.AddInteger(FI); 1122 void *IP = 0; 1123 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1124 return SDValue(E, 0); 1125 1126 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget); 1127 CSEMap.InsertNode(N, IP); 1128 AllNodes.push_back(N); 1129 return SDValue(N, 0); 1130 } 1131 1132 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, 1133 unsigned char TargetFlags) { 1134 assert((TargetFlags == 0 || isTarget) && 1135 "Cannot set target flags on target-independent jump tables"); 1136 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; 1137 FoldingSetNodeID ID; 1138 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1139 ID.AddInteger(JTI); 1140 ID.AddInteger(TargetFlags); 1141 void *IP = 0; 1142 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1143 return SDValue(E, 0); 1144 1145 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget, 1146 TargetFlags); 1147 CSEMap.InsertNode(N, IP); 1148 AllNodes.push_back(N); 1149 return SDValue(N, 0); 1150 } 1151 1152 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, 1153 unsigned Alignment, int Offset, 1154 bool isTarget, 1155 unsigned char TargetFlags) { 1156 assert((TargetFlags == 0 || isTarget) && 1157 "Cannot set target flags on target-independent globals"); 1158 if (Alignment == 0) 1159 Alignment = 1160 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType()); 1161 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1162 FoldingSetNodeID ID; 1163 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1164 ID.AddInteger(Alignment); 1165 ID.AddInteger(Offset); 1166 ID.AddPointer(C); 1167 ID.AddInteger(TargetFlags); 1168 void *IP = 0; 1169 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1170 return SDValue(E, 0); 1171 1172 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1173 Alignment, TargetFlags); 1174 CSEMap.InsertNode(N, IP); 1175 AllNodes.push_back(N); 1176 return SDValue(N, 0); 1177 } 1178 1179 1180 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, 1181 unsigned Alignment, int Offset, 1182 bool isTarget, 1183 unsigned char TargetFlags) { 1184 assert((TargetFlags == 0 || isTarget) && 1185 "Cannot set target flags on target-independent globals"); 1186 if (Alignment == 0) 1187 Alignment = 1188 TM.getTargetLowering()->getDataLayout()->getPrefTypeAlignment(C->getType()); 1189 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; 1190 FoldingSetNodeID ID; 1191 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1192 ID.AddInteger(Alignment); 1193 ID.AddInteger(Offset); 1194 C->addSelectionDAGCSEId(ID); 1195 ID.AddInteger(TargetFlags); 1196 void *IP = 0; 1197 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1198 return SDValue(E, 0); 1199 1200 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset, 1201 Alignment, TargetFlags); 1202 CSEMap.InsertNode(N, IP); 1203 AllNodes.push_back(N); 1204 return SDValue(N, 0); 1205 } 1206 1207 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset, 1208 unsigned char TargetFlags) { 1209 FoldingSetNodeID ID; 1210 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0); 1211 ID.AddInteger(Index); 1212 ID.AddInteger(Offset); 1213 ID.AddInteger(TargetFlags); 1214 void *IP = 0; 1215 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1216 return SDValue(E, 0); 1217 1218 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset, 1219 TargetFlags); 1220 CSEMap.InsertNode(N, IP); 1221 AllNodes.push_back(N); 1222 return SDValue(N, 0); 1223 } 1224 1225 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { 1226 FoldingSetNodeID ID; 1227 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0); 1228 ID.AddPointer(MBB); 1229 void *IP = 0; 1230 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1231 return SDValue(E, 0); 1232 1233 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB); 1234 CSEMap.InsertNode(N, IP); 1235 AllNodes.push_back(N); 1236 return SDValue(N, 0); 1237 } 1238 1239 SDValue SelectionDAG::getValueType(EVT VT) { 1240 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >= 1241 ValueTypeNodes.size()) 1242 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1); 1243 1244 SDNode *&N = VT.isExtended() ? 1245 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy]; 1246 1247 if (N) return SDValue(N, 0); 1248 N = new (NodeAllocator) VTSDNode(VT); 1249 AllNodes.push_back(N); 1250 return SDValue(N, 0); 1251 } 1252 1253 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { 1254 SDNode *&N = ExternalSymbols[Sym]; 1255 if (N) return SDValue(N, 0); 1256 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT); 1257 AllNodes.push_back(N); 1258 return SDValue(N, 0); 1259 } 1260 1261 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, 1262 unsigned char TargetFlags) { 1263 SDNode *&N = 1264 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym, 1265 TargetFlags)]; 1266 if (N) return SDValue(N, 0); 1267 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT); 1268 AllNodes.push_back(N); 1269 return SDValue(N, 0); 1270 } 1271 1272 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { 1273 if ((unsigned)Cond >= CondCodeNodes.size()) 1274 CondCodeNodes.resize(Cond+1); 1275 1276 if (CondCodeNodes[Cond] == 0) { 1277 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond); 1278 CondCodeNodes[Cond] = N; 1279 AllNodes.push_back(N); 1280 } 1281 1282 return SDValue(CondCodeNodes[Cond], 0); 1283 } 1284 1285 // commuteShuffle - swaps the values of N1 and N2, and swaps all indices in 1286 // the shuffle mask M that point at N1 to point at N2, and indices that point 1287 // N2 to point at N1. 1288 static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) { 1289 std::swap(N1, N2); 1290 int NElts = M.size(); 1291 for (int i = 0; i != NElts; ++i) { 1292 if (M[i] >= NElts) 1293 M[i] -= NElts; 1294 else if (M[i] >= 0) 1295 M[i] += NElts; 1296 } 1297 } 1298 1299 SDValue SelectionDAG::getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, 1300 SDValue N2, const int *Mask) { 1301 assert(N1.getValueType() == N2.getValueType() && "Invalid VECTOR_SHUFFLE"); 1302 assert(VT.isVector() && N1.getValueType().isVector() && 1303 "Vector Shuffle VTs must be a vectors"); 1304 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() 1305 && "Vector Shuffle VTs must have same element type"); 1306 1307 // Canonicalize shuffle undef, undef -> undef 1308 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF) 1309 return getUNDEF(VT); 1310 1311 // Validate that all indices in Mask are within the range of the elements 1312 // input to the shuffle. 1313 unsigned NElts = VT.getVectorNumElements(); 1314 SmallVector<int, 8> MaskVec; 1315 for (unsigned i = 0; i != NElts; ++i) { 1316 assert(Mask[i] < (int)(NElts * 2) && "Index out of range"); 1317 MaskVec.push_back(Mask[i]); 1318 } 1319 1320 // Canonicalize shuffle v, v -> v, undef 1321 if (N1 == N2) { 1322 N2 = getUNDEF(VT); 1323 for (unsigned i = 0; i != NElts; ++i) 1324 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts; 1325 } 1326 1327 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 1328 if (N1.getOpcode() == ISD::UNDEF) 1329 commuteShuffle(N1, N2, MaskVec); 1330 1331 // Canonicalize all index into lhs, -> shuffle lhs, undef 1332 // Canonicalize all index into rhs, -> shuffle rhs, undef 1333 bool AllLHS = true, AllRHS = true; 1334 bool N2Undef = N2.getOpcode() == ISD::UNDEF; 1335 for (unsigned i = 0; i != NElts; ++i) { 1336 if (MaskVec[i] >= (int)NElts) { 1337 if (N2Undef) 1338 MaskVec[i] = -1; 1339 else 1340 AllLHS = false; 1341 } else if (MaskVec[i] >= 0) { 1342 AllRHS = false; 1343 } 1344 } 1345 if (AllLHS && AllRHS) 1346 return getUNDEF(VT); 1347 if (AllLHS && !N2Undef) 1348 N2 = getUNDEF(VT); 1349 if (AllRHS) { 1350 N1 = getUNDEF(VT); 1351 commuteShuffle(N1, N2, MaskVec); 1352 } 1353 1354 // If Identity shuffle, or all shuffle in to undef, return that node. 1355 bool AllUndef = true; 1356 bool Identity = true; 1357 for (unsigned i = 0; i != NElts; ++i) { 1358 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false; 1359 if (MaskVec[i] >= 0) AllUndef = false; 1360 } 1361 if (Identity && NElts == N1.getValueType().getVectorNumElements()) 1362 return N1; 1363 if (AllUndef) 1364 return getUNDEF(VT); 1365 1366 FoldingSetNodeID ID; 1367 SDValue Ops[2] = { N1, N2 }; 1368 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2); 1369 for (unsigned i = 0; i != NElts; ++i) 1370 ID.AddInteger(MaskVec[i]); 1371 1372 void* IP = 0; 1373 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1374 return SDValue(E, 0); 1375 1376 // Allocate the mask array for the node out of the BumpPtrAllocator, since 1377 // SDNode doesn't have access to it. This memory will be "leaked" when 1378 // the node is deallocated, but recovered when the NodeAllocator is released. 1379 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts); 1380 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int)); 1381 1382 ShuffleVectorSDNode *N = 1383 new (NodeAllocator) ShuffleVectorSDNode(VT, dl.getIROrder(), dl.getDebugLoc(), N1, N2, MaskAlloc); 1384 CSEMap.InsertNode(N, IP); 1385 AllNodes.push_back(N); 1386 return SDValue(N, 0); 1387 } 1388 1389 SDValue SelectionDAG::getConvertRndSat(EVT VT, SDLoc dl, 1390 SDValue Val, SDValue DTy, 1391 SDValue STy, SDValue Rnd, SDValue Sat, 1392 ISD::CvtCode Code) { 1393 // If the src and dest types are the same and the conversion is between 1394 // integer types of the same sign or two floats, no conversion is necessary. 1395 if (DTy == STy && 1396 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF)) 1397 return Val; 1398 1399 FoldingSetNodeID ID; 1400 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat }; 1401 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5); 1402 void* IP = 0; 1403 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1404 return SDValue(E, 0); 1405 1406 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl.getIROrder(), dl.getDebugLoc(), Ops, 5, 1407 Code); 1408 CSEMap.InsertNode(N, IP); 1409 AllNodes.push_back(N); 1410 return SDValue(N, 0); 1411 } 1412 1413 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { 1414 FoldingSetNodeID ID; 1415 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0); 1416 ID.AddInteger(RegNo); 1417 void *IP = 0; 1418 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1419 return SDValue(E, 0); 1420 1421 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT); 1422 CSEMap.InsertNode(N, IP); 1423 AllNodes.push_back(N); 1424 return SDValue(N, 0); 1425 } 1426 1427 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) { 1428 FoldingSetNodeID ID; 1429 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0); 1430 ID.AddPointer(RegMask); 1431 void *IP = 0; 1432 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1433 return SDValue(E, 0); 1434 1435 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask); 1436 CSEMap.InsertNode(N, IP); 1437 AllNodes.push_back(N); 1438 return SDValue(N, 0); 1439 } 1440 1441 SDValue SelectionDAG::getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label) { 1442 FoldingSetNodeID ID; 1443 SDValue Ops[] = { Root }; 1444 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1); 1445 ID.AddPointer(Label); 1446 void *IP = 0; 1447 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1448 return SDValue(E, 0); 1449 1450 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl.getIROrder(), dl.getDebugLoc(), Root, Label); 1451 CSEMap.InsertNode(N, IP); 1452 AllNodes.push_back(N); 1453 return SDValue(N, 0); 1454 } 1455 1456 1457 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, 1458 int64_t Offset, 1459 bool isTarget, 1460 unsigned char TargetFlags) { 1461 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; 1462 1463 FoldingSetNodeID ID; 1464 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); 1465 ID.AddPointer(BA); 1466 ID.AddInteger(Offset); 1467 ID.AddInteger(TargetFlags); 1468 void *IP = 0; 1469 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1470 return SDValue(E, 0); 1471 1472 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset, 1473 TargetFlags); 1474 CSEMap.InsertNode(N, IP); 1475 AllNodes.push_back(N); 1476 return SDValue(N, 0); 1477 } 1478 1479 SDValue SelectionDAG::getSrcValue(const Value *V) { 1480 assert((!V || V->getType()->isPointerTy()) && 1481 "SrcValue is not a pointer?"); 1482 1483 FoldingSetNodeID ID; 1484 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0); 1485 ID.AddPointer(V); 1486 1487 void *IP = 0; 1488 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1489 return SDValue(E, 0); 1490 1491 SDNode *N = new (NodeAllocator) SrcValueSDNode(V); 1492 CSEMap.InsertNode(N, IP); 1493 AllNodes.push_back(N); 1494 return SDValue(N, 0); 1495 } 1496 1497 /// getMDNode - Return an MDNodeSDNode which holds an MDNode. 1498 SDValue SelectionDAG::getMDNode(const MDNode *MD) { 1499 FoldingSetNodeID ID; 1500 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0); 1501 ID.AddPointer(MD); 1502 1503 void *IP = 0; 1504 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 1505 return SDValue(E, 0); 1506 1507 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD); 1508 CSEMap.InsertNode(N, IP); 1509 AllNodes.push_back(N); 1510 return SDValue(N, 0); 1511 } 1512 1513 1514 /// getShiftAmountOperand - Return the specified value casted to 1515 /// the target's desired shift amount type. 1516 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) { 1517 EVT OpTy = Op.getValueType(); 1518 EVT ShTy = TM.getTargetLowering()->getShiftAmountTy(LHSTy); 1519 if (OpTy == ShTy || OpTy.isVector()) return Op; 1520 1521 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND; 1522 return getNode(Opcode, SDLoc(Op), ShTy, Op); 1523 } 1524 1525 /// CreateStackTemporary - Create a stack temporary, suitable for holding the 1526 /// specified value type. 1527 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { 1528 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1529 unsigned ByteSize = VT.getStoreSize(); 1530 Type *Ty = VT.getTypeForEVT(*getContext()); 1531 const TargetLowering *TLI = TM.getTargetLowering(); 1532 unsigned StackAlign = 1533 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty), minAlign); 1534 1535 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 1536 return getFrameIndex(FrameIdx, TLI->getPointerTy()); 1537 } 1538 1539 /// CreateStackTemporary - Create a stack temporary suitable for holding 1540 /// either of the specified value types. 1541 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { 1542 unsigned Bytes = std::max(VT1.getStoreSizeInBits(), 1543 VT2.getStoreSizeInBits())/8; 1544 Type *Ty1 = VT1.getTypeForEVT(*getContext()); 1545 Type *Ty2 = VT2.getTypeForEVT(*getContext()); 1546 const TargetLowering *TLI = TM.getTargetLowering(); 1547 const DataLayout *TD = TLI->getDataLayout(); 1548 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1), 1549 TD->getPrefTypeAlignment(Ty2)); 1550 1551 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); 1552 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false); 1553 return getFrameIndex(FrameIdx, TLI->getPointerTy()); 1554 } 1555 1556 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, 1557 SDValue N2, ISD::CondCode Cond, SDLoc dl) { 1558 // These setcc operations always fold. 1559 switch (Cond) { 1560 default: break; 1561 case ISD::SETFALSE: 1562 case ISD::SETFALSE2: return getConstant(0, VT); 1563 case ISD::SETTRUE: 1564 case ISD::SETTRUE2: return getConstant(1, VT); 1565 1566 case ISD::SETOEQ: 1567 case ISD::SETOGT: 1568 case ISD::SETOGE: 1569 case ISD::SETOLT: 1570 case ISD::SETOLE: 1571 case ISD::SETONE: 1572 case ISD::SETO: 1573 case ISD::SETUO: 1574 case ISD::SETUEQ: 1575 case ISD::SETUNE: 1576 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!"); 1577 break; 1578 } 1579 1580 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) { 1581 const APInt &C2 = N2C->getAPIntValue(); 1582 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1583 const APInt &C1 = N1C->getAPIntValue(); 1584 1585 switch (Cond) { 1586 default: llvm_unreachable("Unknown integer setcc!"); 1587 case ISD::SETEQ: return getConstant(C1 == C2, VT); 1588 case ISD::SETNE: return getConstant(C1 != C2, VT); 1589 case ISD::SETULT: return getConstant(C1.ult(C2), VT); 1590 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT); 1591 case ISD::SETULE: return getConstant(C1.ule(C2), VT); 1592 case ISD::SETUGE: return getConstant(C1.uge(C2), VT); 1593 case ISD::SETLT: return getConstant(C1.slt(C2), VT); 1594 case ISD::SETGT: return getConstant(C1.sgt(C2), VT); 1595 case ISD::SETLE: return getConstant(C1.sle(C2), VT); 1596 case ISD::SETGE: return getConstant(C1.sge(C2), VT); 1597 } 1598 } 1599 } 1600 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1601 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) { 1602 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); 1603 switch (Cond) { 1604 default: break; 1605 case ISD::SETEQ: if (R==APFloat::cmpUnordered) 1606 return getUNDEF(VT); 1607 // fall through 1608 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT); 1609 case ISD::SETNE: if (R==APFloat::cmpUnordered) 1610 return getUNDEF(VT); 1611 // fall through 1612 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan || 1613 R==APFloat::cmpLessThan, VT); 1614 case ISD::SETLT: if (R==APFloat::cmpUnordered) 1615 return getUNDEF(VT); 1616 // fall through 1617 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT); 1618 case ISD::SETGT: if (R==APFloat::cmpUnordered) 1619 return getUNDEF(VT); 1620 // fall through 1621 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT); 1622 case ISD::SETLE: if (R==APFloat::cmpUnordered) 1623 return getUNDEF(VT); 1624 // fall through 1625 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan || 1626 R==APFloat::cmpEqual, VT); 1627 case ISD::SETGE: if (R==APFloat::cmpUnordered) 1628 return getUNDEF(VT); 1629 // fall through 1630 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan || 1631 R==APFloat::cmpEqual, VT); 1632 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT); 1633 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT); 1634 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered || 1635 R==APFloat::cmpEqual, VT); 1636 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT); 1637 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered || 1638 R==APFloat::cmpLessThan, VT); 1639 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan || 1640 R==APFloat::cmpUnordered, VT); 1641 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT); 1642 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT); 1643 } 1644 } else { 1645 // Ensure that the constant occurs on the RHS. 1646 return getSetCC(dl, VT, N2, N1, ISD::getSetCCSwappedOperands(Cond)); 1647 } 1648 } 1649 1650 // Could not fold it. 1651 return SDValue(); 1652 } 1653 1654 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We 1655 /// use this predicate to simplify operations downstream. 1656 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { 1657 // This predicate is not safe for vector operations. 1658 if (Op.getValueType().isVector()) 1659 return false; 1660 1661 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1662 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); 1663 } 1664 1665 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 1666 /// this predicate to simplify operations downstream. Mask is known to be zero 1667 /// for bits that V cannot have. 1668 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, 1669 unsigned Depth) const { 1670 APInt KnownZero, KnownOne; 1671 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth); 1672 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1673 return (KnownZero & Mask) == Mask; 1674 } 1675 1676 /// ComputeMaskedBits - Determine which of the bits specified in Mask are 1677 /// known to be either zero or one and return them in the KnownZero/KnownOne 1678 /// bitsets. This code only analyzes bits in Mask, in order to short-circuit 1679 /// processing. 1680 void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero, 1681 APInt &KnownOne, unsigned Depth) const { 1682 const TargetLowering *TLI = TM.getTargetLowering(); 1683 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits(); 1684 1685 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything. 1686 if (Depth == 6) 1687 return; // Limit search depth. 1688 1689 APInt KnownZero2, KnownOne2; 1690 1691 switch (Op.getOpcode()) { 1692 case ISD::Constant: 1693 // We know all of the bits for a constant! 1694 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue(); 1695 KnownZero = ~KnownOne; 1696 return; 1697 case ISD::AND: 1698 // If either the LHS or the RHS are Zero, the result is zero. 1699 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1700 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1701 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1702 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1703 1704 // Output known-1 bits are only known if set in both the LHS & RHS. 1705 KnownOne &= KnownOne2; 1706 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1707 KnownZero |= KnownZero2; 1708 return; 1709 case ISD::OR: 1710 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1711 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1712 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1713 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1714 1715 // Output known-0 bits are only known if clear in both the LHS & RHS. 1716 KnownZero &= KnownZero2; 1717 // Output known-1 are known to be set if set in either the LHS | RHS. 1718 KnownOne |= KnownOne2; 1719 return; 1720 case ISD::XOR: { 1721 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1722 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1723 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1724 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1725 1726 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1727 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1728 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1729 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1730 KnownZero = KnownZeroOut; 1731 return; 1732 } 1733 case ISD::MUL: { 1734 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 1735 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1736 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1737 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1738 1739 // If low bits are zero in either operand, output low known-0 bits. 1740 // Also compute a conserative estimate for high known-0 bits. 1741 // More trickiness is possible, but this is sufficient for the 1742 // interesting case of alignment computation. 1743 KnownOne.clearAllBits(); 1744 unsigned TrailZ = KnownZero.countTrailingOnes() + 1745 KnownZero2.countTrailingOnes(); 1746 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 1747 KnownZero2.countLeadingOnes(), 1748 BitWidth) - BitWidth; 1749 1750 TrailZ = std::min(TrailZ, BitWidth); 1751 LeadZ = std::min(LeadZ, BitWidth); 1752 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 1753 APInt::getHighBitsSet(BitWidth, LeadZ); 1754 return; 1755 } 1756 case ISD::UDIV: { 1757 // For the purposes of computing leading zeros we can conservatively 1758 // treat a udiv as a logical right shift by the power of 2 known to 1759 // be less than the denominator. 1760 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 1761 unsigned LeadZ = KnownZero2.countLeadingOnes(); 1762 1763 KnownOne2.clearAllBits(); 1764 KnownZero2.clearAllBits(); 1765 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1766 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 1767 if (RHSUnknownLeadingOnes != BitWidth) 1768 LeadZ = std::min(BitWidth, 1769 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 1770 1771 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 1772 return; 1773 } 1774 case ISD::SELECT: 1775 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1); 1776 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 1777 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1778 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1779 1780 // Only known if known in both the LHS and RHS. 1781 KnownOne &= KnownOne2; 1782 KnownZero &= KnownZero2; 1783 return; 1784 case ISD::SELECT_CC: 1785 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1); 1786 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1); 1787 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1788 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1789 1790 // Only known if known in both the LHS and RHS. 1791 KnownOne &= KnownOne2; 1792 KnownZero &= KnownZero2; 1793 return; 1794 case ISD::SADDO: 1795 case ISD::UADDO: 1796 case ISD::SSUBO: 1797 case ISD::USUBO: 1798 case ISD::SMULO: 1799 case ISD::UMULO: 1800 if (Op.getResNo() != 1) 1801 return; 1802 // The boolean result conforms to getBooleanContents. Fall through. 1803 case ISD::SETCC: 1804 // If we know the result of a setcc has the top bits zero, use this info. 1805 if (TLI->getBooleanContents(Op.getValueType().isVector()) == 1806 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1) 1807 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); 1808 return; 1809 case ISD::SHL: 1810 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1811 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1812 unsigned ShAmt = SA->getZExtValue(); 1813 1814 // If the shift count is an invalid immediate, don't do anything. 1815 if (ShAmt >= BitWidth) 1816 return; 1817 1818 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1819 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1820 KnownZero <<= ShAmt; 1821 KnownOne <<= ShAmt; 1822 // low bits known zero. 1823 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt); 1824 } 1825 return; 1826 case ISD::SRL: 1827 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1828 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1829 unsigned ShAmt = SA->getZExtValue(); 1830 1831 // If the shift count is an invalid immediate, don't do anything. 1832 if (ShAmt >= BitWidth) 1833 return; 1834 1835 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1836 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1837 KnownZero = KnownZero.lshr(ShAmt); 1838 KnownOne = KnownOne.lshr(ShAmt); 1839 1840 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1841 KnownZero |= HighBits; // High bits known zero. 1842 } 1843 return; 1844 case ISD::SRA: 1845 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1846 unsigned ShAmt = SA->getZExtValue(); 1847 1848 // If the shift count is an invalid immediate, don't do anything. 1849 if (ShAmt >= BitWidth) 1850 return; 1851 1852 // If any of the demanded bits are produced by the sign extension, we also 1853 // demand the input sign bit. 1854 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1855 1856 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1857 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1858 KnownZero = KnownZero.lshr(ShAmt); 1859 KnownOne = KnownOne.lshr(ShAmt); 1860 1861 // Handle the sign bits. 1862 APInt SignBit = APInt::getSignBit(BitWidth); 1863 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask. 1864 1865 if (KnownZero.intersects(SignBit)) { 1866 KnownZero |= HighBits; // New bits are known zero. 1867 } else if (KnownOne.intersects(SignBit)) { 1868 KnownOne |= HighBits; // New bits are known one. 1869 } 1870 } 1871 return; 1872 case ISD::SIGN_EXTEND_INREG: { 1873 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1874 unsigned EBits = EVT.getScalarType().getSizeInBits(); 1875 1876 // Sign extension. Compute the demanded bits in the result that are not 1877 // present in the input. 1878 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits); 1879 1880 APInt InSignBit = APInt::getSignBit(EBits); 1881 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits); 1882 1883 // If the sign extended bits are demanded, we know that the sign 1884 // bit is demanded. 1885 InSignBit = InSignBit.zext(BitWidth); 1886 if (NewBits.getBoolValue()) 1887 InputDemandedBits |= InSignBit; 1888 1889 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1890 KnownOne &= InputDemandedBits; 1891 KnownZero &= InputDemandedBits; 1892 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1893 1894 // If the sign bit of the input is known set or clear, then we know the 1895 // top bits of the result. 1896 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear 1897 KnownZero |= NewBits; 1898 KnownOne &= ~NewBits; 1899 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1900 KnownOne |= NewBits; 1901 KnownZero &= ~NewBits; 1902 } else { // Input sign bit unknown 1903 KnownZero &= ~NewBits; 1904 KnownOne &= ~NewBits; 1905 } 1906 return; 1907 } 1908 case ISD::CTTZ: 1909 case ISD::CTTZ_ZERO_UNDEF: 1910 case ISD::CTLZ: 1911 case ISD::CTLZ_ZERO_UNDEF: 1912 case ISD::CTPOP: { 1913 unsigned LowBits = Log2_32(BitWidth)+1; 1914 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 1915 KnownOne.clearAllBits(); 1916 return; 1917 } 1918 case ISD::LOAD: { 1919 LoadSDNode *LD = cast<LoadSDNode>(Op); 1920 // If this is a ZEXTLoad and we are looking at the loaded value. 1921 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 1922 EVT VT = LD->getMemoryVT(); 1923 unsigned MemBits = VT.getScalarType().getSizeInBits(); 1924 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 1925 } else if (const MDNode *Ranges = LD->getRanges()) { 1926 computeMaskedBitsLoad(*Ranges, KnownZero); 1927 } 1928 return; 1929 } 1930 case ISD::ZERO_EXTEND: { 1931 EVT InVT = Op.getOperand(0).getValueType(); 1932 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1933 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 1934 KnownZero = KnownZero.trunc(InBits); 1935 KnownOne = KnownOne.trunc(InBits); 1936 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1937 KnownZero = KnownZero.zext(BitWidth); 1938 KnownOne = KnownOne.zext(BitWidth); 1939 KnownZero |= NewBits; 1940 return; 1941 } 1942 case ISD::SIGN_EXTEND: { 1943 EVT InVT = Op.getOperand(0).getValueType(); 1944 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1945 APInt InSignBit = APInt::getSignBit(InBits); 1946 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits); 1947 1948 KnownZero = KnownZero.trunc(InBits); 1949 KnownOne = KnownOne.trunc(InBits); 1950 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1951 1952 // Note if the sign bit is known to be zero or one. 1953 bool SignBitKnownZero = KnownZero.isNegative(); 1954 bool SignBitKnownOne = KnownOne.isNegative(); 1955 assert(!(SignBitKnownZero && SignBitKnownOne) && 1956 "Sign bit can't be known to be both zero and one!"); 1957 1958 KnownZero = KnownZero.zext(BitWidth); 1959 KnownOne = KnownOne.zext(BitWidth); 1960 1961 // If the sign bit is known zero or one, the top bits match. 1962 if (SignBitKnownZero) 1963 KnownZero |= NewBits; 1964 else if (SignBitKnownOne) 1965 KnownOne |= NewBits; 1966 return; 1967 } 1968 case ISD::ANY_EXTEND: { 1969 EVT InVT = Op.getOperand(0).getValueType(); 1970 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1971 KnownZero = KnownZero.trunc(InBits); 1972 KnownOne = KnownOne.trunc(InBits); 1973 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1974 KnownZero = KnownZero.zext(BitWidth); 1975 KnownOne = KnownOne.zext(BitWidth); 1976 return; 1977 } 1978 case ISD::TRUNCATE: { 1979 EVT InVT = Op.getOperand(0).getValueType(); 1980 unsigned InBits = InVT.getScalarType().getSizeInBits(); 1981 KnownZero = KnownZero.zext(InBits); 1982 KnownOne = KnownOne.zext(InBits); 1983 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1984 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1985 KnownZero = KnownZero.trunc(BitWidth); 1986 KnownOne = KnownOne.trunc(BitWidth); 1987 break; 1988 } 1989 case ISD::AssertZext: { 1990 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1991 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits()); 1992 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 1993 KnownZero |= (~InMask); 1994 KnownOne &= (~KnownZero); 1995 return; 1996 } 1997 case ISD::FGETSIGN: 1998 // All bits are zero except the low bit. 1999 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1); 2000 return; 2001 2002 case ISD::SUB: { 2003 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) { 2004 // We know that the top bits of C-X are clear if X contains less bits 2005 // than C (i.e. no wrap-around can happen). For example, 20-X is 2006 // positive if we can prove that X is >= 0 and < 16. 2007 if (CLHS->getAPIntValue().isNonNegative()) { 2008 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros(); 2009 // NLZ can't be BitWidth with no sign bit 2010 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 2011 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2012 2013 // If all of the MaskV bits are known to be zero, then we know the 2014 // output top bits are zero, because we now know that the output is 2015 // from [0-C]. 2016 if ((KnownZero2 & MaskV) == MaskV) { 2017 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros(); 2018 // Top bits known zero. 2019 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 2020 } 2021 } 2022 } 2023 } 2024 // fall through 2025 case ISD::ADD: 2026 case ISD::ADDE: { 2027 // Output known-0 bits are known if clear or set in both the low clear bits 2028 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the 2029 // low 3 bits clear. 2030 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1); 2031 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 2032 unsigned KnownZeroOut = KnownZero2.countTrailingOnes(); 2033 2034 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2035 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 2036 KnownZeroOut = std::min(KnownZeroOut, 2037 KnownZero2.countTrailingOnes()); 2038 2039 if (Op.getOpcode() == ISD::ADD) { 2040 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut); 2041 return; 2042 } 2043 2044 // With ADDE, a carry bit may be added in, so we can only use this 2045 // information if we know (at least) that the low two bits are clear. We 2046 // then return to the caller that the low bit is unknown but that other bits 2047 // are known zero. 2048 if (KnownZeroOut >= 2) // ADDE 2049 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut); 2050 return; 2051 } 2052 case ISD::SREM: 2053 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2054 const APInt &RA = Rem->getAPIntValue().abs(); 2055 if (RA.isPowerOf2()) { 2056 APInt LowBits = RA - 1; 2057 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); 2058 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1); 2059 2060 // The low bits of the first operand are unchanged by the srem. 2061 KnownZero = KnownZero2 & LowBits; 2062 KnownOne = KnownOne2 & LowBits; 2063 2064 // If the first operand is non-negative or has all low bits zero, then 2065 // the upper bits are all zero. 2066 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 2067 KnownZero |= ~LowBits; 2068 2069 // If the first operand is negative and not all low bits are zero, then 2070 // the upper bits are all one. 2071 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 2072 KnownOne |= ~LowBits; 2073 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2074 } 2075 } 2076 return; 2077 case ISD::UREM: { 2078 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2079 const APInt &RA = Rem->getAPIntValue(); 2080 if (RA.isPowerOf2()) { 2081 APInt LowBits = (RA - 1); 2082 KnownZero |= ~LowBits; 2083 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1); 2084 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 2085 break; 2086 } 2087 } 2088 2089 // Since the result is less than or equal to either operand, any leading 2090 // zero bits in either operand must also exist in the result. 2091 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2092 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1); 2093 2094 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(), 2095 KnownZero2.countLeadingOnes()); 2096 KnownOne.clearAllBits(); 2097 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 2098 return; 2099 } 2100 case ISD::FrameIndex: 2101 case ISD::TargetFrameIndex: 2102 if (unsigned Align = InferPtrAlignment(Op)) { 2103 // The low bits are known zero if the pointer is aligned. 2104 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align)); 2105 return; 2106 } 2107 break; 2108 2109 default: 2110 if (Op.getOpcode() < ISD::BUILTIN_OP_END) 2111 break; 2112 // Fallthrough 2113 case ISD::INTRINSIC_WO_CHAIN: 2114 case ISD::INTRINSIC_W_CHAIN: 2115 case ISD::INTRINSIC_VOID: 2116 // Allow the target to implement this method for its nodes. 2117 TLI->computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth); 2118 return; 2119 } 2120 } 2121 2122 /// ComputeNumSignBits - Return the number of times the sign bit of the 2123 /// register is replicated into the other bits. We know that at least 1 bit 2124 /// is always equal to the sign bit (itself), but other cases can give us 2125 /// information. For example, immediately after an "SRA X, 2", we know that 2126 /// the top 3 bits are all equal to each other, so we return 3. 2127 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ 2128 const TargetLowering *TLI = TM.getTargetLowering(); 2129 EVT VT = Op.getValueType(); 2130 assert(VT.isInteger() && "Invalid VT!"); 2131 unsigned VTBits = VT.getScalarType().getSizeInBits(); 2132 unsigned Tmp, Tmp2; 2133 unsigned FirstAnswer = 1; 2134 2135 if (Depth == 6) 2136 return 1; // Limit search depth. 2137 2138 switch (Op.getOpcode()) { 2139 default: break; 2140 case ISD::AssertSext: 2141 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2142 return VTBits-Tmp+1; 2143 case ISD::AssertZext: 2144 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits(); 2145 return VTBits-Tmp; 2146 2147 case ISD::Constant: { 2148 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue(); 2149 return Val.getNumSignBits(); 2150 } 2151 2152 case ISD::SIGN_EXTEND: 2153 Tmp = VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits(); 2154 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp; 2155 2156 case ISD::SIGN_EXTEND_INREG: 2157 // Max of the input and what this extends. 2158 Tmp = 2159 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits(); 2160 Tmp = VTBits-Tmp+1; 2161 2162 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2163 return std::max(Tmp, Tmp2); 2164 2165 case ISD::SRA: 2166 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2167 // SRA X, C -> adds C sign bits. 2168 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2169 Tmp += C->getZExtValue(); 2170 if (Tmp > VTBits) Tmp = VTBits; 2171 } 2172 return Tmp; 2173 case ISD::SHL: 2174 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2175 // shl destroys sign bits. 2176 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2177 if (C->getZExtValue() >= VTBits || // Bad shift. 2178 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. 2179 return Tmp - C->getZExtValue(); 2180 } 2181 break; 2182 case ISD::AND: 2183 case ISD::OR: 2184 case ISD::XOR: // NOT is handled here. 2185 // Logical binary ops preserve the number of sign bits at the worst. 2186 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2187 if (Tmp != 1) { 2188 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2189 FirstAnswer = std::min(Tmp, Tmp2); 2190 // We computed what we know about the sign bits as our first 2191 // answer. Now proceed to the generic code that uses 2192 // ComputeMaskedBits, and pick whichever answer is better. 2193 } 2194 break; 2195 2196 case ISD::SELECT: 2197 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2198 if (Tmp == 1) return 1; // Early out. 2199 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1); 2200 return std::min(Tmp, Tmp2); 2201 2202 case ISD::SADDO: 2203 case ISD::UADDO: 2204 case ISD::SSUBO: 2205 case ISD::USUBO: 2206 case ISD::SMULO: 2207 case ISD::UMULO: 2208 if (Op.getResNo() != 1) 2209 break; 2210 // The boolean result conforms to getBooleanContents. Fall through. 2211 case ISD::SETCC: 2212 // If setcc returns 0/-1, all bits are sign bits. 2213 if (TLI->getBooleanContents(Op.getValueType().isVector()) == 2214 TargetLowering::ZeroOrNegativeOneBooleanContent) 2215 return VTBits; 2216 break; 2217 case ISD::ROTL: 2218 case ISD::ROTR: 2219 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2220 unsigned RotAmt = C->getZExtValue() & (VTBits-1); 2221 2222 // Handle rotate right by N like a rotate left by 32-N. 2223 if (Op.getOpcode() == ISD::ROTR) 2224 RotAmt = (VTBits-RotAmt) & (VTBits-1); 2225 2226 // If we aren't rotating out all of the known-in sign bits, return the 2227 // number that are left. This handles rotl(sext(x), 1) for example. 2228 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2229 if (Tmp > RotAmt+1) return Tmp-RotAmt; 2230 } 2231 break; 2232 case ISD::ADD: 2233 // Add can have at most one carry bit. Thus we know that the output 2234 // is, at worst, one more bit than the inputs. 2235 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2236 if (Tmp == 1) return 1; // Early out. 2237 2238 // Special case decrementing a value (ADD X, -1): 2239 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2240 if (CRHS->isAllOnesValue()) { 2241 APInt KnownZero, KnownOne; 2242 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 2243 2244 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2245 // sign bits set. 2246 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2247 return VTBits; 2248 2249 // If we are subtracting one from a positive number, there is no carry 2250 // out of the result. 2251 if (KnownZero.isNegative()) 2252 return Tmp; 2253 } 2254 2255 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2256 if (Tmp2 == 1) return 1; 2257 return std::min(Tmp, Tmp2)-1; 2258 2259 case ISD::SUB: 2260 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1); 2261 if (Tmp2 == 1) return 1; 2262 2263 // Handle NEG. 2264 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) 2265 if (CLHS->isNullValue()) { 2266 APInt KnownZero, KnownOne; 2267 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1); 2268 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2269 // sign bits set. 2270 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue()) 2271 return VTBits; 2272 2273 // If the input is known to be positive (the sign bit is known clear), 2274 // the output of the NEG has the same number of sign bits as the input. 2275 if (KnownZero.isNegative()) 2276 return Tmp2; 2277 2278 // Otherwise, we treat this like a SUB. 2279 } 2280 2281 // Sub can have at most one carry bit. Thus we know that the output 2282 // is, at worst, one more bit than the inputs. 2283 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); 2284 if (Tmp == 1) return 1; // Early out. 2285 return std::min(Tmp, Tmp2)-1; 2286 case ISD::TRUNCATE: 2287 // FIXME: it's tricky to do anything useful for this, but it is an important 2288 // case for targets like X86. 2289 break; 2290 } 2291 2292 // If we are looking at the loaded value of the SDNode. 2293 if (Op.getResNo() == 0) { 2294 // Handle LOADX separately here. EXTLOAD case will fallthrough. 2295 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) { 2296 unsigned ExtType = LD->getExtensionType(); 2297 switch (ExtType) { 2298 default: break; 2299 case ISD::SEXTLOAD: // '17' bits known 2300 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2301 return VTBits-Tmp+1; 2302 case ISD::ZEXTLOAD: // '16' bits known 2303 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits(); 2304 return VTBits-Tmp; 2305 } 2306 } 2307 } 2308 2309 // Allow the target to implement this method for its nodes. 2310 if (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2311 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2312 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2313 Op.getOpcode() == ISD::INTRINSIC_VOID) { 2314 unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, Depth); 2315 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits); 2316 } 2317 2318 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2319 // use this information. 2320 APInt KnownZero, KnownOne; 2321 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth); 2322 2323 APInt Mask; 2324 if (KnownZero.isNegative()) { // sign bit is 0 2325 Mask = KnownZero; 2326 } else if (KnownOne.isNegative()) { // sign bit is 1; 2327 Mask = KnownOne; 2328 } else { 2329 // Nothing known. 2330 return FirstAnswer; 2331 } 2332 2333 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 2334 // the number of identical bits in the top of the input value. 2335 Mask = ~Mask; 2336 Mask <<= Mask.getBitWidth()-VTBits; 2337 // Return # leading zeros. We use 'min' here in case Val was zero before 2338 // shifting. We don't want to return '64' as for an i32 "0". 2339 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros())); 2340 } 2341 2342 /// isBaseWithConstantOffset - Return true if the specified operand is an 2343 /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an 2344 /// ISD::OR with a ConstantSDNode that is guaranteed to have the same 2345 /// semantics as an ADD. This handles the equivalence: 2346 /// X|Cst == X+Cst iff X&Cst = 0. 2347 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const { 2348 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) || 2349 !isa<ConstantSDNode>(Op.getOperand(1))) 2350 return false; 2351 2352 if (Op.getOpcode() == ISD::OR && 2353 !MaskedValueIsZero(Op.getOperand(0), 2354 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue())) 2355 return false; 2356 2357 return true; 2358 } 2359 2360 2361 bool SelectionDAG::isKnownNeverNaN(SDValue Op) const { 2362 // If we're told that NaNs won't happen, assume they won't. 2363 if (getTarget().Options.NoNaNsFPMath) 2364 return true; 2365 2366 // If the value is a constant, we can obviously see if it is a NaN or not. 2367 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2368 return !C->getValueAPF().isNaN(); 2369 2370 // TODO: Recognize more cases here. 2371 2372 return false; 2373 } 2374 2375 bool SelectionDAG::isKnownNeverZero(SDValue Op) const { 2376 // If the value is a constant, we can obviously see if it is a zero or not. 2377 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) 2378 return !C->isZero(); 2379 2380 // TODO: Recognize more cases here. 2381 switch (Op.getOpcode()) { 2382 default: break; 2383 case ISD::OR: 2384 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 2385 return !C->isNullValue(); 2386 break; 2387 } 2388 2389 return false; 2390 } 2391 2392 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const { 2393 // Check the obvious case. 2394 if (A == B) return true; 2395 2396 // For for negative and positive zero. 2397 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) 2398 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) 2399 if (CA->isZero() && CB->isZero()) return true; 2400 2401 // Otherwise they may not be equal. 2402 return false; 2403 } 2404 2405 /// getNode - Gets or creates the specified node. 2406 /// 2407 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT) { 2408 FoldingSetNodeID ID; 2409 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0); 2410 void *IP = 0; 2411 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2412 return SDValue(E, 0); 2413 2414 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), getVTList(VT)); 2415 CSEMap.InsertNode(N, IP); 2416 2417 AllNodes.push_back(N); 2418 #ifndef NDEBUG 2419 VerifySDNode(N); 2420 #endif 2421 return SDValue(N, 0); 2422 } 2423 2424 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 2425 EVT VT, SDValue Operand) { 2426 // Constant fold unary operations with an integer constant operand. 2427 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) { 2428 const APInt &Val = C->getAPIntValue(); 2429 switch (Opcode) { 2430 default: break; 2431 case ISD::SIGN_EXTEND: 2432 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT); 2433 case ISD::ANY_EXTEND: 2434 case ISD::ZERO_EXTEND: 2435 case ISD::TRUNCATE: 2436 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT); 2437 case ISD::UINT_TO_FP: 2438 case ISD::SINT_TO_FP: { 2439 APFloat apf(EVTToAPFloatSemantics(VT), 2440 APInt::getNullValue(VT.getSizeInBits())); 2441 (void)apf.convertFromAPInt(Val, 2442 Opcode==ISD::SINT_TO_FP, 2443 APFloat::rmNearestTiesToEven); 2444 return getConstantFP(apf, VT); 2445 } 2446 case ISD::BITCAST: 2447 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) 2448 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT); 2449 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) 2450 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT); 2451 break; 2452 case ISD::BSWAP: 2453 return getConstant(Val.byteSwap(), VT); 2454 case ISD::CTPOP: 2455 return getConstant(Val.countPopulation(), VT); 2456 case ISD::CTLZ: 2457 case ISD::CTLZ_ZERO_UNDEF: 2458 return getConstant(Val.countLeadingZeros(), VT); 2459 case ISD::CTTZ: 2460 case ISD::CTTZ_ZERO_UNDEF: 2461 return getConstant(Val.countTrailingZeros(), VT); 2462 } 2463 } 2464 2465 // Constant fold unary operations with a floating point constant operand. 2466 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) { 2467 APFloat V = C->getValueAPF(); // make copy 2468 switch (Opcode) { 2469 case ISD::FNEG: 2470 V.changeSign(); 2471 return getConstantFP(V, VT); 2472 case ISD::FABS: 2473 V.clearSign(); 2474 return getConstantFP(V, VT); 2475 case ISD::FCEIL: { 2476 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); 2477 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2478 return getConstantFP(V, VT); 2479 break; 2480 } 2481 case ISD::FTRUNC: { 2482 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); 2483 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2484 return getConstantFP(V, VT); 2485 break; 2486 } 2487 case ISD::FFLOOR: { 2488 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); 2489 if (fs == APFloat::opOK || fs == APFloat::opInexact) 2490 return getConstantFP(V, VT); 2491 break; 2492 } 2493 case ISD::FP_EXTEND: { 2494 bool ignored; 2495 // This can return overflow, underflow, or inexact; we don't care. 2496 // FIXME need to be more flexible about rounding mode. 2497 (void)V.convert(EVTToAPFloatSemantics(VT), 2498 APFloat::rmNearestTiesToEven, &ignored); 2499 return getConstantFP(V, VT); 2500 } 2501 case ISD::FP_TO_SINT: 2502 case ISD::FP_TO_UINT: { 2503 integerPart x[2]; 2504 bool ignored; 2505 assert(integerPartWidth >= 64); 2506 // FIXME need to be more flexible about rounding mode. 2507 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), 2508 Opcode==ISD::FP_TO_SINT, 2509 APFloat::rmTowardZero, &ignored); 2510 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual 2511 break; 2512 APInt api(VT.getSizeInBits(), x); 2513 return getConstant(api, VT); 2514 } 2515 case ISD::BITCAST: 2516 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) 2517 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT); 2518 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) 2519 return getConstant(V.bitcastToAPInt().getZExtValue(), VT); 2520 break; 2521 } 2522 } 2523 2524 unsigned OpOpcode = Operand.getNode()->getOpcode(); 2525 switch (Opcode) { 2526 case ISD::TokenFactor: 2527 case ISD::MERGE_VALUES: 2528 case ISD::CONCAT_VECTORS: 2529 return Operand; // Factor, merge or concat of one node? No need. 2530 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node"); 2531 case ISD::FP_EXTEND: 2532 assert(VT.isFloatingPoint() && 2533 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!"); 2534 if (Operand.getValueType() == VT) return Operand; // noop conversion. 2535 assert((!VT.isVector() || 2536 VT.getVectorNumElements() == 2537 Operand.getValueType().getVectorNumElements()) && 2538 "Vector element count mismatch!"); 2539 if (Operand.getOpcode() == ISD::UNDEF) 2540 return getUNDEF(VT); 2541 break; 2542 case ISD::SIGN_EXTEND: 2543 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2544 "Invalid SIGN_EXTEND!"); 2545 if (Operand.getValueType() == VT) return Operand; // noop extension 2546 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2547 "Invalid sext node, dst < src!"); 2548 assert((!VT.isVector() || 2549 VT.getVectorNumElements() == 2550 Operand.getValueType().getVectorNumElements()) && 2551 "Vector element count mismatch!"); 2552 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND) 2553 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2554 else if (OpOpcode == ISD::UNDEF) 2555 // sext(undef) = 0, because the top bits will all be the same. 2556 return getConstant(0, VT); 2557 break; 2558 case ISD::ZERO_EXTEND: 2559 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2560 "Invalid ZERO_EXTEND!"); 2561 if (Operand.getValueType() == VT) return Operand; // noop extension 2562 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2563 "Invalid zext node, dst < src!"); 2564 assert((!VT.isVector() || 2565 VT.getVectorNumElements() == 2566 Operand.getValueType().getVectorNumElements()) && 2567 "Vector element count mismatch!"); 2568 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x) 2569 return getNode(ISD::ZERO_EXTEND, DL, VT, 2570 Operand.getNode()->getOperand(0)); 2571 else if (OpOpcode == ISD::UNDEF) 2572 // zext(undef) = 0, because the top bits will be zero. 2573 return getConstant(0, VT); 2574 break; 2575 case ISD::ANY_EXTEND: 2576 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2577 "Invalid ANY_EXTEND!"); 2578 if (Operand.getValueType() == VT) return Operand; // noop extension 2579 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && 2580 "Invalid anyext node, dst < src!"); 2581 assert((!VT.isVector() || 2582 VT.getVectorNumElements() == 2583 Operand.getValueType().getVectorNumElements()) && 2584 "Vector element count mismatch!"); 2585 2586 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2587 OpOpcode == ISD::ANY_EXTEND) 2588 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x) 2589 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2590 else if (OpOpcode == ISD::UNDEF) 2591 return getUNDEF(VT); 2592 2593 // (ext (trunx x)) -> x 2594 if (OpOpcode == ISD::TRUNCATE) { 2595 SDValue OpOp = Operand.getNode()->getOperand(0); 2596 if (OpOp.getValueType() == VT) 2597 return OpOp; 2598 } 2599 break; 2600 case ISD::TRUNCATE: 2601 assert(VT.isInteger() && Operand.getValueType().isInteger() && 2602 "Invalid TRUNCATE!"); 2603 if (Operand.getValueType() == VT) return Operand; // noop truncate 2604 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) && 2605 "Invalid truncate node, src < dst!"); 2606 assert((!VT.isVector() || 2607 VT.getVectorNumElements() == 2608 Operand.getValueType().getVectorNumElements()) && 2609 "Vector element count mismatch!"); 2610 if (OpOpcode == ISD::TRUNCATE) 2611 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2612 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND || 2613 OpOpcode == ISD::ANY_EXTEND) { 2614 // If the source is smaller than the dest, we still need an extend. 2615 if (Operand.getNode()->getOperand(0).getValueType().getScalarType() 2616 .bitsLT(VT.getScalarType())) 2617 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0)); 2618 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT)) 2619 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0)); 2620 return Operand.getNode()->getOperand(0); 2621 } 2622 if (OpOpcode == ISD::UNDEF) 2623 return getUNDEF(VT); 2624 break; 2625 case ISD::BITCAST: 2626 // Basic sanity checking. 2627 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() 2628 && "Cannot BITCAST between types of different sizes!"); 2629 if (VT == Operand.getValueType()) return Operand; // noop conversion. 2630 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x) 2631 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0)); 2632 if (OpOpcode == ISD::UNDEF) 2633 return getUNDEF(VT); 2634 break; 2635 case ISD::SCALAR_TO_VECTOR: 2636 assert(VT.isVector() && !Operand.getValueType().isVector() && 2637 (VT.getVectorElementType() == Operand.getValueType() || 2638 (VT.getVectorElementType().isInteger() && 2639 Operand.getValueType().isInteger() && 2640 VT.getVectorElementType().bitsLE(Operand.getValueType()))) && 2641 "Illegal SCALAR_TO_VECTOR node!"); 2642 if (OpOpcode == ISD::UNDEF) 2643 return getUNDEF(VT); 2644 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined. 2645 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT && 2646 isa<ConstantSDNode>(Operand.getOperand(1)) && 2647 Operand.getConstantOperandVal(1) == 0 && 2648 Operand.getOperand(0).getValueType() == VT) 2649 return Operand.getOperand(0); 2650 break; 2651 case ISD::FNEG: 2652 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0 2653 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB) 2654 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1), 2655 Operand.getNode()->getOperand(0)); 2656 if (OpOpcode == ISD::FNEG) // --X -> X 2657 return Operand.getNode()->getOperand(0); 2658 break; 2659 case ISD::FABS: 2660 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X) 2661 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0)); 2662 break; 2663 } 2664 2665 SDNode *N; 2666 SDVTList VTs = getVTList(VT); 2667 if (VT != MVT::Glue) { // Don't CSE flag producing nodes 2668 FoldingSetNodeID ID; 2669 SDValue Ops[1] = { Operand }; 2670 AddNodeIDNode(ID, Opcode, VTs, Ops, 1); 2671 void *IP = 0; 2672 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 2673 return SDValue(E, 0); 2674 2675 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Operand); 2676 CSEMap.InsertNode(N, IP); 2677 } else { 2678 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Operand); 2679 } 2680 2681 AllNodes.push_back(N); 2682 #ifndef NDEBUG 2683 VerifySDNode(N); 2684 #endif 2685 return SDValue(N, 0); 2686 } 2687 2688 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT, 2689 SDNode *Cst1, SDNode *Cst2) { 2690 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs; 2691 SmallVector<SDValue, 4> Outputs; 2692 EVT SVT = VT.getScalarType(); 2693 2694 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1); 2695 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2); 2696 if (Scalar1 && Scalar2) { 2697 // Scalar instruction. 2698 Inputs.push_back(std::make_pair(Scalar1, Scalar2)); 2699 } else { 2700 // For vectors extract each constant element into Inputs so we can constant 2701 // fold them individually. 2702 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1); 2703 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2); 2704 if (!BV1 || !BV2) 2705 return SDValue(); 2706 2707 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!"); 2708 2709 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) { 2710 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I)); 2711 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I)); 2712 if (!V1 || !V2) // Not a constant, bail. 2713 return SDValue(); 2714 2715 // Avoid BUILD_VECTOR nodes that perform implicit truncation. 2716 // FIXME: This is valid and could be handled by truncating the APInts. 2717 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT) 2718 return SDValue(); 2719 2720 Inputs.push_back(std::make_pair(V1, V2)); 2721 } 2722 } 2723 2724 // We have a number of constant values, constant fold them element by element. 2725 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) { 2726 const APInt &C1 = Inputs[I].first->getAPIntValue(); 2727 const APInt &C2 = Inputs[I].second->getAPIntValue(); 2728 2729 switch (Opcode) { 2730 case ISD::ADD: 2731 Outputs.push_back(getConstant(C1 + C2, SVT)); 2732 break; 2733 case ISD::SUB: 2734 Outputs.push_back(getConstant(C1 - C2, SVT)); 2735 break; 2736 case ISD::MUL: 2737 Outputs.push_back(getConstant(C1 * C2, SVT)); 2738 break; 2739 case ISD::UDIV: 2740 if (!C2.getBoolValue()) 2741 return SDValue(); 2742 Outputs.push_back(getConstant(C1.udiv(C2), SVT)); 2743 break; 2744 case ISD::UREM: 2745 if (!C2.getBoolValue()) 2746 return SDValue(); 2747 Outputs.push_back(getConstant(C1.urem(C2), SVT)); 2748 break; 2749 case ISD::SDIV: 2750 if (!C2.getBoolValue()) 2751 return SDValue(); 2752 Outputs.push_back(getConstant(C1.sdiv(C2), SVT)); 2753 break; 2754 case ISD::SREM: 2755 if (!C2.getBoolValue()) 2756 return SDValue(); 2757 Outputs.push_back(getConstant(C1.srem(C2), SVT)); 2758 break; 2759 case ISD::AND: 2760 Outputs.push_back(getConstant(C1 & C2, SVT)); 2761 break; 2762 case ISD::OR: 2763 Outputs.push_back(getConstant(C1 | C2, SVT)); 2764 break; 2765 case ISD::XOR: 2766 Outputs.push_back(getConstant(C1 ^ C2, SVT)); 2767 break; 2768 case ISD::SHL: 2769 Outputs.push_back(getConstant(C1 << C2, SVT)); 2770 break; 2771 case ISD::SRL: 2772 Outputs.push_back(getConstant(C1.lshr(C2), SVT)); 2773 break; 2774 case ISD::SRA: 2775 Outputs.push_back(getConstant(C1.ashr(C2), SVT)); 2776 break; 2777 case ISD::ROTL: 2778 Outputs.push_back(getConstant(C1.rotl(C2), SVT)); 2779 break; 2780 case ISD::ROTR: 2781 Outputs.push_back(getConstant(C1.rotr(C2), SVT)); 2782 break; 2783 default: 2784 return SDValue(); 2785 } 2786 } 2787 2788 // Handle the scalar case first. 2789 if (Scalar1 && Scalar2) 2790 return Outputs.back(); 2791 2792 // Otherwise build a big vector out of the scalar elements we generated. 2793 return getNode(ISD::BUILD_VECTOR, SDLoc(), VT, Outputs.data(), 2794 Outputs.size()); 2795 } 2796 2797 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, 2798 SDValue N2) { 2799 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 2800 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 2801 switch (Opcode) { 2802 default: break; 2803 case ISD::TokenFactor: 2804 assert(VT == MVT::Other && N1.getValueType() == MVT::Other && 2805 N2.getValueType() == MVT::Other && "Invalid token factor!"); 2806 // Fold trivial token factors. 2807 if (N1.getOpcode() == ISD::EntryToken) return N2; 2808 if (N2.getOpcode() == ISD::EntryToken) return N1; 2809 if (N1 == N2) return N1; 2810 break; 2811 case ISD::CONCAT_VECTORS: 2812 // Concat of UNDEFs is UNDEF. 2813 if (N1.getOpcode() == ISD::UNDEF && 2814 N2.getOpcode() == ISD::UNDEF) 2815 return getUNDEF(VT); 2816 2817 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 2818 // one big BUILD_VECTOR. 2819 if (N1.getOpcode() == ISD::BUILD_VECTOR && 2820 N2.getOpcode() == ISD::BUILD_VECTOR) { 2821 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 2822 N1.getNode()->op_end()); 2823 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 2824 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size()); 2825 } 2826 break; 2827 case ISD::AND: 2828 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2829 assert(N1.getValueType() == N2.getValueType() && 2830 N1.getValueType() == VT && "Binary operator types must match!"); 2831 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's 2832 // worth handling here. 2833 if (N2C && N2C->isNullValue()) 2834 return N2; 2835 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X 2836 return N1; 2837 break; 2838 case ISD::OR: 2839 case ISD::XOR: 2840 case ISD::ADD: 2841 case ISD::SUB: 2842 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2843 assert(N1.getValueType() == N2.getValueType() && 2844 N1.getValueType() == VT && "Binary operator types must match!"); 2845 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so 2846 // it's worth handling here. 2847 if (N2C && N2C->isNullValue()) 2848 return N1; 2849 break; 2850 case ISD::UDIV: 2851 case ISD::UREM: 2852 case ISD::MULHU: 2853 case ISD::MULHS: 2854 case ISD::MUL: 2855 case ISD::SDIV: 2856 case ISD::SREM: 2857 assert(VT.isInteger() && "This operator does not apply to FP types!"); 2858 assert(N1.getValueType() == N2.getValueType() && 2859 N1.getValueType() == VT && "Binary operator types must match!"); 2860 break; 2861 case ISD::FADD: 2862 case ISD::FSUB: 2863 case ISD::FMUL: 2864 case ISD::FDIV: 2865 case ISD::FREM: 2866 if (getTarget().Options.UnsafeFPMath) { 2867 if (Opcode == ISD::FADD) { 2868 // 0+x --> x 2869 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) 2870 if (CFP->getValueAPF().isZero()) 2871 return N2; 2872 // x+0 --> x 2873 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 2874 if (CFP->getValueAPF().isZero()) 2875 return N1; 2876 } else if (Opcode == ISD::FSUB) { 2877 // x-0 --> x 2878 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2)) 2879 if (CFP->getValueAPF().isZero()) 2880 return N1; 2881 } else if (Opcode == ISD::FMUL) { 2882 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1); 2883 SDValue V = N2; 2884 2885 // If the first operand isn't the constant, try the second 2886 if (!CFP) { 2887 CFP = dyn_cast<ConstantFPSDNode>(N2); 2888 V = N1; 2889 } 2890 2891 if (CFP) { 2892 // 0*x --> 0 2893 if (CFP->isZero()) 2894 return SDValue(CFP,0); 2895 // 1*x --> x 2896 if (CFP->isExactlyValue(1.0)) 2897 return V; 2898 } 2899 } 2900 } 2901 assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); 2902 assert(N1.getValueType() == N2.getValueType() && 2903 N1.getValueType() == VT && "Binary operator types must match!"); 2904 break; 2905 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match. 2906 assert(N1.getValueType() == VT && 2907 N1.getValueType().isFloatingPoint() && 2908 N2.getValueType().isFloatingPoint() && 2909 "Invalid FCOPYSIGN!"); 2910 break; 2911 case ISD::SHL: 2912 case ISD::SRA: 2913 case ISD::SRL: 2914 case ISD::ROTL: 2915 case ISD::ROTR: 2916 assert(VT == N1.getValueType() && 2917 "Shift operators return type must be the same as their first arg"); 2918 assert(VT.isInteger() && N2.getValueType().isInteger() && 2919 "Shifts only work on integers"); 2920 assert((!VT.isVector() || VT == N2.getValueType()) && 2921 "Vector shift amounts must be in the same as their first arg"); 2922 // Verify that the shift amount VT is bit enough to hold valid shift 2923 // amounts. This catches things like trying to shift an i1024 value by an 2924 // i8, which is easy to fall into in generic code that uses 2925 // TLI.getShiftAmount(). 2926 assert(N2.getValueType().getSizeInBits() >= 2927 Log2_32_Ceil(N1.getValueType().getSizeInBits()) && 2928 "Invalid use of small shift amount with oversized value!"); 2929 2930 // Always fold shifts of i1 values so the code generator doesn't need to 2931 // handle them. Since we know the size of the shift has to be less than the 2932 // size of the value, the shift/rotate count is guaranteed to be zero. 2933 if (VT == MVT::i1) 2934 return N1; 2935 if (N2C && N2C->isNullValue()) 2936 return N1; 2937 break; 2938 case ISD::FP_ROUND_INREG: { 2939 EVT EVT = cast<VTSDNode>(N2)->getVT(); 2940 assert(VT == N1.getValueType() && "Not an inreg round!"); 2941 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() && 2942 "Cannot FP_ROUND_INREG integer types"); 2943 assert(EVT.isVector() == VT.isVector() && 2944 "FP_ROUND_INREG type should be vector iff the operand " 2945 "type is vector!"); 2946 assert((!EVT.isVector() || 2947 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 2948 "Vector element counts must match in FP_ROUND_INREG"); 2949 assert(EVT.bitsLE(VT) && "Not rounding down!"); 2950 (void)EVT; 2951 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding. 2952 break; 2953 } 2954 case ISD::FP_ROUND: 2955 assert(VT.isFloatingPoint() && 2956 N1.getValueType().isFloatingPoint() && 2957 VT.bitsLE(N1.getValueType()) && 2958 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!"); 2959 if (N1.getValueType() == VT) return N1; // noop conversion. 2960 break; 2961 case ISD::AssertSext: 2962 case ISD::AssertZext: { 2963 EVT EVT = cast<VTSDNode>(N2)->getVT(); 2964 assert(VT == N1.getValueType() && "Not an inreg extend!"); 2965 assert(VT.isInteger() && EVT.isInteger() && 2966 "Cannot *_EXTEND_INREG FP types"); 2967 assert(!EVT.isVector() && 2968 "AssertSExt/AssertZExt type should be the vector element type " 2969 "rather than the vector type!"); 2970 assert(EVT.bitsLE(VT) && "Not extending!"); 2971 if (VT == EVT) return N1; // noop assertion. 2972 break; 2973 } 2974 case ISD::SIGN_EXTEND_INREG: { 2975 EVT EVT = cast<VTSDNode>(N2)->getVT(); 2976 assert(VT == N1.getValueType() && "Not an inreg extend!"); 2977 assert(VT.isInteger() && EVT.isInteger() && 2978 "Cannot *_EXTEND_INREG FP types"); 2979 assert(EVT.isVector() == VT.isVector() && 2980 "SIGN_EXTEND_INREG type should be vector iff the operand " 2981 "type is vector!"); 2982 assert((!EVT.isVector() || 2983 EVT.getVectorNumElements() == VT.getVectorNumElements()) && 2984 "Vector element counts must match in SIGN_EXTEND_INREG"); 2985 assert(EVT.bitsLE(VT) && "Not extending!"); 2986 if (EVT == VT) return N1; // Not actually extending 2987 2988 if (N1C) { 2989 APInt Val = N1C->getAPIntValue(); 2990 unsigned FromBits = EVT.getScalarType().getSizeInBits(); 2991 Val <<= Val.getBitWidth()-FromBits; 2992 Val = Val.ashr(Val.getBitWidth()-FromBits); 2993 return getConstant(Val, VT); 2994 } 2995 break; 2996 } 2997 case ISD::EXTRACT_VECTOR_ELT: 2998 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF. 2999 if (N1.getOpcode() == ISD::UNDEF) 3000 return getUNDEF(VT); 3001 3002 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is 3003 // expanding copies of large vectors from registers. 3004 if (N2C && 3005 N1.getOpcode() == ISD::CONCAT_VECTORS && 3006 N1.getNumOperands() > 0) { 3007 unsigned Factor = 3008 N1.getOperand(0).getValueType().getVectorNumElements(); 3009 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, 3010 N1.getOperand(N2C->getZExtValue() / Factor), 3011 getConstant(N2C->getZExtValue() % Factor, 3012 N2.getValueType())); 3013 } 3014 3015 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is 3016 // expanding large vector constants. 3017 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { 3018 SDValue Elt = N1.getOperand(N2C->getZExtValue()); 3019 3020 if (VT != Elt.getValueType()) 3021 // If the vector element type is not legal, the BUILD_VECTOR operands 3022 // are promoted and implicitly truncated, and the result implicitly 3023 // extended. Make that explicit here. 3024 Elt = getAnyExtOrTrunc(Elt, DL, VT); 3025 3026 return Elt; 3027 } 3028 3029 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector 3030 // operations are lowered to scalars. 3031 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) { 3032 // If the indices are the same, return the inserted element else 3033 // if the indices are known different, extract the element from 3034 // the original vector. 3035 SDValue N1Op2 = N1.getOperand(2); 3036 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode()); 3037 3038 if (N1Op2C && N2C) { 3039 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { 3040 if (VT == N1.getOperand(1).getValueType()) 3041 return N1.getOperand(1); 3042 else 3043 return getSExtOrTrunc(N1.getOperand(1), DL, VT); 3044 } 3045 3046 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2); 3047 } 3048 } 3049 break; 3050 case ISD::EXTRACT_ELEMENT: 3051 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!"); 3052 assert(!N1.getValueType().isVector() && !VT.isVector() && 3053 (N1.getValueType().isInteger() == VT.isInteger()) && 3054 N1.getValueType() != VT && 3055 "Wrong types for EXTRACT_ELEMENT!"); 3056 3057 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding 3058 // 64-bit integers into 32-bit parts. Instead of building the extract of 3059 // the BUILD_PAIR, only to have legalize rip it apart, just do it now. 3060 if (N1.getOpcode() == ISD::BUILD_PAIR) 3061 return N1.getOperand(N2C->getZExtValue()); 3062 3063 // EXTRACT_ELEMENT of a constant int is also very common. 3064 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 3065 unsigned ElementSize = VT.getSizeInBits(); 3066 unsigned Shift = ElementSize * N2C->getZExtValue(); 3067 APInt ShiftedVal = C->getAPIntValue().lshr(Shift); 3068 return getConstant(ShiftedVal.trunc(ElementSize), VT); 3069 } 3070 break; 3071 case ISD::EXTRACT_SUBVECTOR: { 3072 SDValue Index = N2; 3073 if (VT.isSimple() && N1.getValueType().isSimple()) { 3074 assert(VT.isVector() && N1.getValueType().isVector() && 3075 "Extract subvector VTs must be a vectors!"); 3076 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() && 3077 "Extract subvector VTs must have the same element type!"); 3078 assert(VT.getSimpleVT() <= N1.getValueType().getSimpleVT() && 3079 "Extract subvector must be from larger vector to smaller vector!"); 3080 3081 if (isa<ConstantSDNode>(Index.getNode())) { 3082 assert((VT.getVectorNumElements() + 3083 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3084 <= N1.getValueType().getVectorNumElements()) 3085 && "Extract subvector overflow!"); 3086 } 3087 3088 // Trivial extraction. 3089 if (VT.getSimpleVT() == N1.getValueType().getSimpleVT()) 3090 return N1; 3091 } 3092 break; 3093 } 3094 } 3095 3096 // Perform trivial constant folding. 3097 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode()); 3098 if (SV.getNode()) return SV; 3099 3100 // Canonicalize constant to RHS if commutative. 3101 if (N1C && !N2C && isCommutativeBinOp(Opcode)) { 3102 std::swap(N1C, N2C); 3103 std::swap(N1, N2); 3104 } 3105 3106 // Constant fold FP operations. 3107 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode()); 3108 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode()); 3109 if (N1CFP) { 3110 if (!N2CFP && isCommutativeBinOp(Opcode)) { 3111 // Canonicalize constant to RHS if commutative. 3112 std::swap(N1CFP, N2CFP); 3113 std::swap(N1, N2); 3114 } else if (N2CFP) { 3115 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); 3116 APFloat::opStatus s; 3117 switch (Opcode) { 3118 case ISD::FADD: 3119 s = V1.add(V2, APFloat::rmNearestTiesToEven); 3120 if (s != APFloat::opInvalidOp) 3121 return getConstantFP(V1, VT); 3122 break; 3123 case ISD::FSUB: 3124 s = V1.subtract(V2, APFloat::rmNearestTiesToEven); 3125 if (s!=APFloat::opInvalidOp) 3126 return getConstantFP(V1, VT); 3127 break; 3128 case ISD::FMUL: 3129 s = V1.multiply(V2, APFloat::rmNearestTiesToEven); 3130 if (s!=APFloat::opInvalidOp) 3131 return getConstantFP(V1, VT); 3132 break; 3133 case ISD::FDIV: 3134 s = V1.divide(V2, APFloat::rmNearestTiesToEven); 3135 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero) 3136 return getConstantFP(V1, VT); 3137 break; 3138 case ISD::FREM : 3139 s = V1.mod(V2, APFloat::rmNearestTiesToEven); 3140 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero) 3141 return getConstantFP(V1, VT); 3142 break; 3143 case ISD::FCOPYSIGN: 3144 V1.copySign(V2); 3145 return getConstantFP(V1, VT); 3146 default: break; 3147 } 3148 } 3149 3150 if (Opcode == ISD::FP_ROUND) { 3151 APFloat V = N1CFP->getValueAPF(); // make copy 3152 bool ignored; 3153 // This can return overflow, underflow, or inexact; we don't care. 3154 // FIXME need to be more flexible about rounding mode. 3155 (void)V.convert(EVTToAPFloatSemantics(VT), 3156 APFloat::rmNearestTiesToEven, &ignored); 3157 return getConstantFP(V, VT); 3158 } 3159 } 3160 3161 // Canonicalize an UNDEF to the RHS, even over a constant. 3162 if (N1.getOpcode() == ISD::UNDEF) { 3163 if (isCommutativeBinOp(Opcode)) { 3164 std::swap(N1, N2); 3165 } else { 3166 switch (Opcode) { 3167 case ISD::FP_ROUND_INREG: 3168 case ISD::SIGN_EXTEND_INREG: 3169 case ISD::SUB: 3170 case ISD::FSUB: 3171 case ISD::FDIV: 3172 case ISD::FREM: 3173 case ISD::SRA: 3174 return N1; // fold op(undef, arg2) -> undef 3175 case ISD::UDIV: 3176 case ISD::SDIV: 3177 case ISD::UREM: 3178 case ISD::SREM: 3179 case ISD::SRL: 3180 case ISD::SHL: 3181 if (!VT.isVector()) 3182 return getConstant(0, VT); // fold op(undef, arg2) -> 0 3183 // For vectors, we can't easily build an all zero vector, just return 3184 // the LHS. 3185 return N2; 3186 } 3187 } 3188 } 3189 3190 // Fold a bunch of operators when the RHS is undef. 3191 if (N2.getOpcode() == ISD::UNDEF) { 3192 switch (Opcode) { 3193 case ISD::XOR: 3194 if (N1.getOpcode() == ISD::UNDEF) 3195 // Handle undef ^ undef -> 0 special case. This is a common 3196 // idiom (misuse). 3197 return getConstant(0, VT); 3198 // fallthrough 3199 case ISD::ADD: 3200 case ISD::ADDC: 3201 case ISD::ADDE: 3202 case ISD::SUB: 3203 case ISD::UDIV: 3204 case ISD::SDIV: 3205 case ISD::UREM: 3206 case ISD::SREM: 3207 return N2; // fold op(arg1, undef) -> undef 3208 case ISD::FADD: 3209 case ISD::FSUB: 3210 case ISD::FMUL: 3211 case ISD::FDIV: 3212 case ISD::FREM: 3213 if (getTarget().Options.UnsafeFPMath) 3214 return N2; 3215 break; 3216 case ISD::MUL: 3217 case ISD::AND: 3218 case ISD::SRL: 3219 case ISD::SHL: 3220 if (!VT.isVector()) 3221 return getConstant(0, VT); // fold op(arg1, undef) -> 0 3222 // For vectors, we can't easily build an all zero vector, just return 3223 // the LHS. 3224 return N1; 3225 case ISD::OR: 3226 if (!VT.isVector()) 3227 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 3228 // For vectors, we can't easily build an all one vector, just return 3229 // the LHS. 3230 return N1; 3231 case ISD::SRA: 3232 return N1; 3233 } 3234 } 3235 3236 // Memoize this node if possible. 3237 SDNode *N; 3238 SDVTList VTs = getVTList(VT); 3239 if (VT != MVT::Glue) { 3240 SDValue Ops[] = { N1, N2 }; 3241 FoldingSetNodeID ID; 3242 AddNodeIDNode(ID, Opcode, VTs, Ops, 2); 3243 void *IP = 0; 3244 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3245 return SDValue(E, 0); 3246 3247 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2); 3248 CSEMap.InsertNode(N, IP); 3249 } else { 3250 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2); 3251 } 3252 3253 AllNodes.push_back(N); 3254 #ifndef NDEBUG 3255 VerifySDNode(N); 3256 #endif 3257 return SDValue(N, 0); 3258 } 3259 3260 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3261 SDValue N1, SDValue N2, SDValue N3) { 3262 // Perform various simplifications. 3263 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 3264 switch (Opcode) { 3265 case ISD::FMA: { 3266 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 3267 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2); 3268 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3); 3269 if (N1CFP && N2CFP && N3CFP) { 3270 APFloat V1 = N1CFP->getValueAPF(); 3271 const APFloat &V2 = N2CFP->getValueAPF(); 3272 const APFloat &V3 = N3CFP->getValueAPF(); 3273 APFloat::opStatus s = 3274 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven); 3275 if (s != APFloat::opInvalidOp) 3276 return getConstantFP(V1, VT); 3277 } 3278 break; 3279 } 3280 case ISD::CONCAT_VECTORS: 3281 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to 3282 // one big BUILD_VECTOR. 3283 if (N1.getOpcode() == ISD::BUILD_VECTOR && 3284 N2.getOpcode() == ISD::BUILD_VECTOR && 3285 N3.getOpcode() == ISD::BUILD_VECTOR) { 3286 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(), 3287 N1.getNode()->op_end()); 3288 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end()); 3289 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end()); 3290 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size()); 3291 } 3292 break; 3293 case ISD::SETCC: { 3294 // Use FoldSetCC to simplify SETCC's. 3295 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL); 3296 if (Simp.getNode()) return Simp; 3297 break; 3298 } 3299 case ISD::SELECT: 3300 if (N1C) { 3301 if (N1C->getZExtValue()) 3302 return N2; // select true, X, Y -> X 3303 return N3; // select false, X, Y -> Y 3304 } 3305 3306 if (N2 == N3) return N2; // select C, X, X -> X 3307 break; 3308 case ISD::VECTOR_SHUFFLE: 3309 llvm_unreachable("should use getVectorShuffle constructor!"); 3310 case ISD::INSERT_SUBVECTOR: { 3311 SDValue Index = N3; 3312 if (VT.isSimple() && N1.getValueType().isSimple() 3313 && N2.getValueType().isSimple()) { 3314 assert(VT.isVector() && N1.getValueType().isVector() && 3315 N2.getValueType().isVector() && 3316 "Insert subvector VTs must be a vectors"); 3317 assert(VT == N1.getValueType() && 3318 "Dest and insert subvector source types must match!"); 3319 assert(N2.getValueType().getSimpleVT() <= N1.getValueType().getSimpleVT() && 3320 "Insert subvector must be from smaller vector to larger vector!"); 3321 if (isa<ConstantSDNode>(Index.getNode())) { 3322 assert((N2.getValueType().getVectorNumElements() + 3323 cast<ConstantSDNode>(Index.getNode())->getZExtValue() 3324 <= VT.getVectorNumElements()) 3325 && "Insert subvector overflow!"); 3326 } 3327 3328 // Trivial insertion. 3329 if (VT.getSimpleVT() == N2.getValueType().getSimpleVT()) 3330 return N2; 3331 } 3332 break; 3333 } 3334 case ISD::BITCAST: 3335 // Fold bit_convert nodes from a type to themselves. 3336 if (N1.getValueType() == VT) 3337 return N1; 3338 break; 3339 } 3340 3341 // Memoize node if it doesn't produce a flag. 3342 SDNode *N; 3343 SDVTList VTs = getVTList(VT); 3344 if (VT != MVT::Glue) { 3345 SDValue Ops[] = { N1, N2, N3 }; 3346 FoldingSetNodeID ID; 3347 AddNodeIDNode(ID, Opcode, VTs, Ops, 3); 3348 void *IP = 0; 3349 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 3350 return SDValue(E, 0); 3351 3352 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, N3); 3353 CSEMap.InsertNode(N, IP); 3354 } else { 3355 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, N1, N2, N3); 3356 } 3357 3358 AllNodes.push_back(N); 3359 #ifndef NDEBUG 3360 VerifySDNode(N); 3361 #endif 3362 return SDValue(N, 0); 3363 } 3364 3365 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3366 SDValue N1, SDValue N2, SDValue N3, 3367 SDValue N4) { 3368 SDValue Ops[] = { N1, N2, N3, N4 }; 3369 return getNode(Opcode, DL, VT, Ops, 4); 3370 } 3371 3372 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 3373 SDValue N1, SDValue N2, SDValue N3, 3374 SDValue N4, SDValue N5) { 3375 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 3376 return getNode(Opcode, DL, VT, Ops, 5); 3377 } 3378 3379 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all 3380 /// the incoming stack arguments to be loaded from the stack. 3381 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) { 3382 SmallVector<SDValue, 8> ArgChains; 3383 3384 // Include the original chain at the beginning of the list. When this is 3385 // used by target LowerCall hooks, this helps legalize find the 3386 // CALLSEQ_BEGIN node. 3387 ArgChains.push_back(Chain); 3388 3389 // Add a chain value for each stack argument. 3390 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), 3391 UE = getEntryNode().getNode()->use_end(); U != UE; ++U) 3392 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 3393 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 3394 if (FI->getIndex() < 0) 3395 ArgChains.push_back(SDValue(L, 1)); 3396 3397 // Build a tokenfactor for all the chains. 3398 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, 3399 &ArgChains[0], ArgChains.size()); 3400 } 3401 3402 /// getMemsetValue - Vectorized representation of the memset value 3403 /// operand. 3404 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG, 3405 SDLoc dl) { 3406 assert(Value.getOpcode() != ISD::UNDEF); 3407 3408 unsigned NumBits = VT.getScalarType().getSizeInBits(); 3409 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { 3410 assert(C->getAPIntValue().getBitWidth() == 8); 3411 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); 3412 if (VT.isInteger()) 3413 return DAG.getConstant(Val, VT); 3414 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT); 3415 } 3416 3417 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value); 3418 if (NumBits > 8) { 3419 // Use a multiplication with 0x010101... to extend the input to the 3420 // required length. 3421 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01)); 3422 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT)); 3423 } 3424 3425 return Value; 3426 } 3427 3428 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only 3429 /// used when a memcpy is turned into a memset when the source is a constant 3430 /// string ptr. 3431 static SDValue getMemsetStringVal(EVT VT, SDLoc dl, SelectionDAG &DAG, 3432 const TargetLowering &TLI, StringRef Str) { 3433 // Handle vector with all elements zero. 3434 if (Str.empty()) { 3435 if (VT.isInteger()) 3436 return DAG.getConstant(0, VT); 3437 else if (VT == MVT::f32 || VT == MVT::f64) 3438 return DAG.getConstantFP(0.0, VT); 3439 else if (VT.isVector()) { 3440 unsigned NumElts = VT.getVectorNumElements(); 3441 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; 3442 return DAG.getNode(ISD::BITCAST, dl, VT, 3443 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(), 3444 EltVT, NumElts))); 3445 } else 3446 llvm_unreachable("Expected type!"); 3447 } 3448 3449 assert(!VT.isVector() && "Can't handle vector type here!"); 3450 unsigned NumVTBits = VT.getSizeInBits(); 3451 unsigned NumVTBytes = NumVTBits / 8; 3452 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size())); 3453 3454 APInt Val(NumVTBits, 0); 3455 if (TLI.isLittleEndian()) { 3456 for (unsigned i = 0; i != NumBytes; ++i) 3457 Val |= (uint64_t)(unsigned char)Str[i] << i*8; 3458 } else { 3459 for (unsigned i = 0; i != NumBytes; ++i) 3460 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8; 3461 } 3462 3463 // If the "cost" of materializing the integer immediate is 1 or free, then 3464 // it is cost effective to turn the load into the immediate. 3465 const TargetTransformInfo *TTI = DAG.getTargetTransformInfo(); 3466 if (TTI->getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2) 3467 return DAG.getConstant(Val, VT); 3468 return SDValue(0, 0); 3469 } 3470 3471 /// getMemBasePlusOffset - Returns base and offset node for the 3472 /// 3473 static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SDLoc dl, 3474 SelectionDAG &DAG) { 3475 EVT VT = Base.getValueType(); 3476 return DAG.getNode(ISD::ADD, dl, 3477 VT, Base, DAG.getConstant(Offset, VT)); 3478 } 3479 3480 /// isMemSrcFromString - Returns true if memcpy source is a string constant. 3481 /// 3482 static bool isMemSrcFromString(SDValue Src, StringRef &Str) { 3483 unsigned SrcDelta = 0; 3484 GlobalAddressSDNode *G = NULL; 3485 if (Src.getOpcode() == ISD::GlobalAddress) 3486 G = cast<GlobalAddressSDNode>(Src); 3487 else if (Src.getOpcode() == ISD::ADD && 3488 Src.getOperand(0).getOpcode() == ISD::GlobalAddress && 3489 Src.getOperand(1).getOpcode() == ISD::Constant) { 3490 G = cast<GlobalAddressSDNode>(Src.getOperand(0)); 3491 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue(); 3492 } 3493 if (!G) 3494 return false; 3495 3496 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false); 3497 } 3498 3499 /// FindOptimalMemOpLowering - Determines the optimial series memory ops 3500 /// to replace the memset / memcpy. Return true if the number of memory ops 3501 /// is below the threshold. It returns the types of the sequence of 3502 /// memory ops to perform memset / memcpy by reference. 3503 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps, 3504 unsigned Limit, uint64_t Size, 3505 unsigned DstAlign, unsigned SrcAlign, 3506 bool IsMemset, 3507 bool ZeroMemset, 3508 bool MemcpyStrSrc, 3509 bool AllowOverlap, 3510 SelectionDAG &DAG, 3511 const TargetLowering &TLI) { 3512 assert((SrcAlign == 0 || SrcAlign >= DstAlign) && 3513 "Expecting memcpy / memset source to meet alignment requirement!"); 3514 // If 'SrcAlign' is zero, that means the memory operation does not need to 3515 // load the value, i.e. memset or memcpy from constant string. Otherwise, 3516 // it's the inferred alignment of the source. 'DstAlign', on the other hand, 3517 // is the specified alignment of the memory operation. If it is zero, that 3518 // means it's possible to change the alignment of the destination. 3519 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does 3520 // not need to be loaded. 3521 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign, 3522 IsMemset, ZeroMemset, MemcpyStrSrc, 3523 DAG.getMachineFunction()); 3524 3525 if (VT == MVT::Other) { 3526 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() || 3527 TLI.allowsUnalignedMemoryAccesses(VT)) { 3528 VT = TLI.getPointerTy(); 3529 } else { 3530 switch (DstAlign & 7) { 3531 case 0: VT = MVT::i64; break; 3532 case 4: VT = MVT::i32; break; 3533 case 2: VT = MVT::i16; break; 3534 default: VT = MVT::i8; break; 3535 } 3536 } 3537 3538 MVT LVT = MVT::i64; 3539 while (!TLI.isTypeLegal(LVT)) 3540 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 3541 assert(LVT.isInteger()); 3542 3543 if (VT.bitsGT(LVT)) 3544 VT = LVT; 3545 } 3546 3547 unsigned NumMemOps = 0; 3548 while (Size != 0) { 3549 unsigned VTSize = VT.getSizeInBits() / 8; 3550 while (VTSize > Size) { 3551 // For now, only use non-vector load / store's for the left-over pieces. 3552 EVT NewVT = VT; 3553 unsigned NewVTSize; 3554 3555 bool Found = false; 3556 if (VT.isVector() || VT.isFloatingPoint()) { 3557 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 3558 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) && 3559 TLI.isSafeMemOpType(NewVT.getSimpleVT())) 3560 Found = true; 3561 else if (NewVT == MVT::i64 && 3562 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 3563 TLI.isSafeMemOpType(MVT::f64)) { 3564 // i64 is usually not legal on 32-bit targets, but f64 may be. 3565 NewVT = MVT::f64; 3566 Found = true; 3567 } 3568 } 3569 3570 if (!Found) { 3571 do { 3572 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 3573 if (NewVT == MVT::i8) 3574 break; 3575 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT())); 3576 } 3577 NewVTSize = NewVT.getSizeInBits() / 8; 3578 3579 // If the new VT cannot cover all of the remaining bits, then consider 3580 // issuing a (or a pair of) unaligned and overlapping load / store. 3581 // FIXME: Only does this for 64-bit or more since we don't have proper 3582 // cost model for unaligned load / store. 3583 bool Fast; 3584 if (NumMemOps && AllowOverlap && 3585 VTSize >= 8 && NewVTSize < Size && 3586 TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast) 3587 VTSize = Size; 3588 else { 3589 VT = NewVT; 3590 VTSize = NewVTSize; 3591 } 3592 } 3593 3594 if (++NumMemOps > Limit) 3595 return false; 3596 3597 MemOps.push_back(VT); 3598 Size -= VTSize; 3599 } 3600 3601 return true; 3602 } 3603 3604 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 3605 SDValue Chain, SDValue Dst, 3606 SDValue Src, uint64_t Size, 3607 unsigned Align, bool isVol, 3608 bool AlwaysInline, 3609 MachinePointerInfo DstPtrInfo, 3610 MachinePointerInfo SrcPtrInfo) { 3611 // Turn a memcpy of undef to nop. 3612 if (Src.getOpcode() == ISD::UNDEF) 3613 return Chain; 3614 3615 // Expand memcpy to a series of load and store ops if the size operand falls 3616 // below a certain threshold. 3617 // TODO: In the AlwaysInline case, if the size is big then generate a loop 3618 // rather than maybe a humongous number of loads and stores. 3619 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3620 std::vector<EVT> MemOps; 3621 bool DstAlignCanChange = false; 3622 MachineFunction &MF = DAG.getMachineFunction(); 3623 MachineFrameInfo *MFI = MF.getFrameInfo(); 3624 bool OptSize = 3625 MF.getFunction()->getAttributes(). 3626 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 3627 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3628 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3629 DstAlignCanChange = true; 3630 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 3631 if (Align > SrcAlign) 3632 SrcAlign = Align; 3633 StringRef Str; 3634 bool CopyFromStr = isMemSrcFromString(Src, Str); 3635 bool isZeroStr = CopyFromStr && Str.empty(); 3636 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); 3637 3638 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 3639 (DstAlignCanChange ? 0 : Align), 3640 (isZeroStr ? 0 : SrcAlign), 3641 false, false, CopyFromStr, true, DAG, TLI)) 3642 return SDValue(); 3643 3644 if (DstAlignCanChange) { 3645 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3646 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 3647 3648 // Don't promote to an alignment that would require dynamic stack 3649 // realignment. 3650 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); 3651 if (!TRI->needsStackRealignment(MF)) 3652 while (NewAlign > Align && 3653 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign)) 3654 NewAlign /= 2; 3655 3656 if (NewAlign > Align) { 3657 // Give the stack frame object a larger alignment if needed. 3658 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3659 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3660 Align = NewAlign; 3661 } 3662 } 3663 3664 SmallVector<SDValue, 8> OutChains; 3665 unsigned NumMemOps = MemOps.size(); 3666 uint64_t SrcOff = 0, DstOff = 0; 3667 for (unsigned i = 0; i != NumMemOps; ++i) { 3668 EVT VT = MemOps[i]; 3669 unsigned VTSize = VT.getSizeInBits() / 8; 3670 SDValue Value, Store; 3671 3672 if (VTSize > Size) { 3673 // Issuing an unaligned load / store pair that overlaps with the previous 3674 // pair. Adjust the offset accordingly. 3675 assert(i == NumMemOps-1 && i != 0); 3676 SrcOff -= VTSize - Size; 3677 DstOff -= VTSize - Size; 3678 } 3679 3680 if (CopyFromStr && 3681 (isZeroStr || (VT.isInteger() && !VT.isVector()))) { 3682 // It's unlikely a store of a vector immediate can be done in a single 3683 // instruction. It would require a load from a constantpool first. 3684 // We only handle zero vectors here. 3685 // FIXME: Handle other cases where store of vector immediate is done in 3686 // a single instruction. 3687 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff)); 3688 if (Value.getNode()) 3689 Store = DAG.getStore(Chain, dl, Value, 3690 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3691 DstPtrInfo.getWithOffset(DstOff), isVol, 3692 false, Align); 3693 } 3694 3695 if (!Store.getNode()) { 3696 // The type might not be legal for the target. This should only happen 3697 // if the type is smaller than a legal type, as on PPC, so the right 3698 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify 3699 // to Load/Store if NVT==VT. 3700 // FIXME does the case above also need this? 3701 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 3702 assert(NVT.bitsGE(VT)); 3703 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, 3704 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 3705 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false, 3706 MinAlign(SrcAlign, SrcOff)); 3707 Store = DAG.getTruncStore(Chain, dl, Value, 3708 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3709 DstPtrInfo.getWithOffset(DstOff), VT, isVol, 3710 false, Align); 3711 } 3712 OutChains.push_back(Store); 3713 SrcOff += VTSize; 3714 DstOff += VTSize; 3715 Size -= VTSize; 3716 } 3717 3718 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3719 &OutChains[0], OutChains.size()); 3720 } 3721 3722 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl, 3723 SDValue Chain, SDValue Dst, 3724 SDValue Src, uint64_t Size, 3725 unsigned Align, bool isVol, 3726 bool AlwaysInline, 3727 MachinePointerInfo DstPtrInfo, 3728 MachinePointerInfo SrcPtrInfo) { 3729 // Turn a memmove of undef to nop. 3730 if (Src.getOpcode() == ISD::UNDEF) 3731 return Chain; 3732 3733 // Expand memmove to a series of load and store ops if the size operand falls 3734 // below a certain threshold. 3735 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3736 std::vector<EVT> MemOps; 3737 bool DstAlignCanChange = false; 3738 MachineFunction &MF = DAG.getMachineFunction(); 3739 MachineFrameInfo *MFI = MF.getFrameInfo(); 3740 bool OptSize = MF.getFunction()->getAttributes(). 3741 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 3742 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3743 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3744 DstAlignCanChange = true; 3745 unsigned SrcAlign = DAG.InferPtrAlignment(Src); 3746 if (Align > SrcAlign) 3747 SrcAlign = Align; 3748 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); 3749 3750 if (!FindOptimalMemOpLowering(MemOps, Limit, Size, 3751 (DstAlignCanChange ? 0 : Align), SrcAlign, 3752 false, false, false, false, DAG, TLI)) 3753 return SDValue(); 3754 3755 if (DstAlignCanChange) { 3756 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3757 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 3758 if (NewAlign > Align) { 3759 // Give the stack frame object a larger alignment if needed. 3760 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3761 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3762 Align = NewAlign; 3763 } 3764 } 3765 3766 uint64_t SrcOff = 0, DstOff = 0; 3767 SmallVector<SDValue, 8> LoadValues; 3768 SmallVector<SDValue, 8> LoadChains; 3769 SmallVector<SDValue, 8> OutChains; 3770 unsigned NumMemOps = MemOps.size(); 3771 for (unsigned i = 0; i < NumMemOps; i++) { 3772 EVT VT = MemOps[i]; 3773 unsigned VTSize = VT.getSizeInBits() / 8; 3774 SDValue Value, Store; 3775 3776 Value = DAG.getLoad(VT, dl, Chain, 3777 getMemBasePlusOffset(Src, SrcOff, dl, DAG), 3778 SrcPtrInfo.getWithOffset(SrcOff), isVol, 3779 false, false, SrcAlign); 3780 LoadValues.push_back(Value); 3781 LoadChains.push_back(Value.getValue(1)); 3782 SrcOff += VTSize; 3783 } 3784 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3785 &LoadChains[0], LoadChains.size()); 3786 OutChains.clear(); 3787 for (unsigned i = 0; i < NumMemOps; i++) { 3788 EVT VT = MemOps[i]; 3789 unsigned VTSize = VT.getSizeInBits() / 8; 3790 SDValue Value, Store; 3791 3792 Store = DAG.getStore(Chain, dl, LoadValues[i], 3793 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3794 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align); 3795 OutChains.push_back(Store); 3796 DstOff += VTSize; 3797 } 3798 3799 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3800 &OutChains[0], OutChains.size()); 3801 } 3802 3803 static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl, 3804 SDValue Chain, SDValue Dst, 3805 SDValue Src, uint64_t Size, 3806 unsigned Align, bool isVol, 3807 MachinePointerInfo DstPtrInfo) { 3808 // Turn a memset of undef to nop. 3809 if (Src.getOpcode() == ISD::UNDEF) 3810 return Chain; 3811 3812 // Expand memset to a series of load/store ops if the size operand 3813 // falls below a certain threshold. 3814 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3815 std::vector<EVT> MemOps; 3816 bool DstAlignCanChange = false; 3817 MachineFunction &MF = DAG.getMachineFunction(); 3818 MachineFrameInfo *MFI = MF.getFrameInfo(); 3819 bool OptSize = MF.getFunction()->getAttributes(). 3820 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); 3821 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); 3822 if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) 3823 DstAlignCanChange = true; 3824 bool IsZeroVal = 3825 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); 3826 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize), 3827 Size, (DstAlignCanChange ? 0 : Align), 0, 3828 true, IsZeroVal, false, true, DAG, TLI)) 3829 return SDValue(); 3830 3831 if (DstAlignCanChange) { 3832 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); 3833 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); 3834 if (NewAlign > Align) { 3835 // Give the stack frame object a larger alignment if needed. 3836 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) 3837 MFI->setObjectAlignment(FI->getIndex(), NewAlign); 3838 Align = NewAlign; 3839 } 3840 } 3841 3842 SmallVector<SDValue, 8> OutChains; 3843 uint64_t DstOff = 0; 3844 unsigned NumMemOps = MemOps.size(); 3845 3846 // Find the largest store and generate the bit pattern for it. 3847 EVT LargestVT = MemOps[0]; 3848 for (unsigned i = 1; i < NumMemOps; i++) 3849 if (MemOps[i].bitsGT(LargestVT)) 3850 LargestVT = MemOps[i]; 3851 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl); 3852 3853 for (unsigned i = 0; i < NumMemOps; i++) { 3854 EVT VT = MemOps[i]; 3855 unsigned VTSize = VT.getSizeInBits() / 8; 3856 if (VTSize > Size) { 3857 // Issuing an unaligned load / store pair that overlaps with the previous 3858 // pair. Adjust the offset accordingly. 3859 assert(i == NumMemOps-1 && i != 0); 3860 DstOff -= VTSize - Size; 3861 } 3862 3863 // If this store is smaller than the largest store see whether we can get 3864 // the smaller value for free with a truncate. 3865 SDValue Value = MemSetValue; 3866 if (VT.bitsLT(LargestVT)) { 3867 if (!LargestVT.isVector() && !VT.isVector() && 3868 TLI.isTruncateFree(LargestVT, VT)) 3869 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue); 3870 else 3871 Value = getMemsetValue(Src, VT, DAG, dl); 3872 } 3873 assert(Value.getValueType() == VT && "Value with wrong type."); 3874 SDValue Store = DAG.getStore(Chain, dl, Value, 3875 getMemBasePlusOffset(Dst, DstOff, dl, DAG), 3876 DstPtrInfo.getWithOffset(DstOff), 3877 isVol, false, Align); 3878 OutChains.push_back(Store); 3879 DstOff += VT.getSizeInBits() / 8; 3880 Size -= VTSize; 3881 } 3882 3883 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 3884 &OutChains[0], OutChains.size()); 3885 } 3886 3887 SDValue SelectionDAG::getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, 3888 SDValue Src, SDValue Size, 3889 unsigned Align, bool isVol, bool AlwaysInline, 3890 MachinePointerInfo DstPtrInfo, 3891 MachinePointerInfo SrcPtrInfo) { 3892 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 3893 3894 // Check to see if we should lower the memcpy to loads and stores first. 3895 // For cases within the target-specified limits, this is the best choice. 3896 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 3897 if (ConstantSize) { 3898 // Memcpy with size zero? Just return the original chain. 3899 if (ConstantSize->isNullValue()) 3900 return Chain; 3901 3902 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 3903 ConstantSize->getZExtValue(),Align, 3904 isVol, false, DstPtrInfo, SrcPtrInfo); 3905 if (Result.getNode()) 3906 return Result; 3907 } 3908 3909 // Then check to see if we should lower the memcpy with target-specific 3910 // code. If the target chooses to do this, this is the next best. 3911 SDValue Result = 3912 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align, 3913 isVol, AlwaysInline, 3914 DstPtrInfo, SrcPtrInfo); 3915 if (Result.getNode()) 3916 return Result; 3917 3918 // If we really need inline code and the target declined to provide it, 3919 // use a (potentially long) sequence of loads and stores. 3920 if (AlwaysInline) { 3921 assert(ConstantSize && "AlwaysInline requires a constant size!"); 3922 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, 3923 ConstantSize->getZExtValue(), Align, isVol, 3924 true, DstPtrInfo, SrcPtrInfo); 3925 } 3926 3927 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc 3928 // memcpy is not guaranteed to be safe. libc memcpys aren't required to 3929 // respect volatile, so they may do things like read or write memory 3930 // beyond the given memory regions. But fixing this isn't easy, and most 3931 // people don't care. 3932 3933 const TargetLowering *TLI = TM.getTargetLowering(); 3934 3935 // Emit a library call. 3936 TargetLowering::ArgListTy Args; 3937 TargetLowering::ArgListEntry Entry; 3938 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext()); 3939 Entry.Node = Dst; Args.push_back(Entry); 3940 Entry.Node = Src; Args.push_back(Entry); 3941 Entry.Node = Size; Args.push_back(Entry); 3942 // FIXME: pass in SDLoc 3943 TargetLowering:: 3944 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 3945 false, false, false, false, 0, 3946 TLI->getLibcallCallingConv(RTLIB::MEMCPY), 3947 /*isTailCall=*/false, 3948 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false, 3949 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY), 3950 TLI->getPointerTy()), 3951 Args, *this, dl); 3952 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 3953 3954 return CallResult.second; 3955 } 3956 3957 SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst, 3958 SDValue Src, SDValue Size, 3959 unsigned Align, bool isVol, 3960 MachinePointerInfo DstPtrInfo, 3961 MachinePointerInfo SrcPtrInfo) { 3962 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 3963 3964 // Check to see if we should lower the memmove to loads and stores first. 3965 // For cases within the target-specified limits, this is the best choice. 3966 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 3967 if (ConstantSize) { 3968 // Memmove with size zero? Just return the original chain. 3969 if (ConstantSize->isNullValue()) 3970 return Chain; 3971 3972 SDValue Result = 3973 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, 3974 ConstantSize->getZExtValue(), Align, isVol, 3975 false, DstPtrInfo, SrcPtrInfo); 3976 if (Result.getNode()) 3977 return Result; 3978 } 3979 3980 // Then check to see if we should lower the memmove with target-specific 3981 // code. If the target chooses to do this, this is the next best. 3982 SDValue Result = 3983 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol, 3984 DstPtrInfo, SrcPtrInfo); 3985 if (Result.getNode()) 3986 return Result; 3987 3988 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may 3989 // not be safe. See memcpy above for more details. 3990 3991 const TargetLowering *TLI = TM.getTargetLowering(); 3992 3993 // Emit a library call. 3994 TargetLowering::ArgListTy Args; 3995 TargetLowering::ArgListEntry Entry; 3996 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext()); 3997 Entry.Node = Dst; Args.push_back(Entry); 3998 Entry.Node = Src; Args.push_back(Entry); 3999 Entry.Node = Size; Args.push_back(Entry); 4000 // FIXME: pass in SDLoc 4001 TargetLowering:: 4002 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 4003 false, false, false, false, 0, 4004 TLI->getLibcallCallingConv(RTLIB::MEMMOVE), 4005 /*isTailCall=*/false, 4006 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false, 4007 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE), 4008 TLI->getPointerTy()), 4009 Args, *this, dl); 4010 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4011 4012 return CallResult.second; 4013 } 4014 4015 SDValue SelectionDAG::getMemset(SDValue Chain, SDLoc dl, SDValue Dst, 4016 SDValue Src, SDValue Size, 4017 unsigned Align, bool isVol, 4018 MachinePointerInfo DstPtrInfo) { 4019 assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); 4020 4021 // Check to see if we should lower the memset to stores first. 4022 // For cases within the target-specified limits, this is the best choice. 4023 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); 4024 if (ConstantSize) { 4025 // Memset with size zero? Just return the original chain. 4026 if (ConstantSize->isNullValue()) 4027 return Chain; 4028 4029 SDValue Result = 4030 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), 4031 Align, isVol, DstPtrInfo); 4032 4033 if (Result.getNode()) 4034 return Result; 4035 } 4036 4037 // Then check to see if we should lower the memset with target-specific 4038 // code. If the target chooses to do this, this is the next best. 4039 SDValue Result = 4040 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol, 4041 DstPtrInfo); 4042 if (Result.getNode()) 4043 return Result; 4044 4045 // Emit a library call. 4046 const TargetLowering *TLI = TM.getTargetLowering(); 4047 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(*getContext()); 4048 TargetLowering::ArgListTy Args; 4049 TargetLowering::ArgListEntry Entry; 4050 Entry.Node = Dst; Entry.Ty = IntPtrTy; 4051 Args.push_back(Entry); 4052 // Extend or truncate the argument to be an i32 value for the call. 4053 if (Src.getValueType().bitsGT(MVT::i32)) 4054 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src); 4055 else 4056 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src); 4057 Entry.Node = Src; 4058 Entry.Ty = Type::getInt32Ty(*getContext()); 4059 Entry.isSExt = true; 4060 Args.push_back(Entry); 4061 Entry.Node = Size; 4062 Entry.Ty = IntPtrTy; 4063 Entry.isSExt = false; 4064 Args.push_back(Entry); 4065 // FIXME: pass in SDLoc 4066 TargetLowering:: 4067 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()), 4068 false, false, false, false, 0, 4069 TLI->getLibcallCallingConv(RTLIB::MEMSET), 4070 /*isTailCall=*/false, 4071 /*doesNotReturn*/false, /*isReturnValueUsed=*/false, 4072 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET), 4073 TLI->getPointerTy()), 4074 Args, *this, dl); 4075 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI); 4076 4077 return CallResult.second; 4078 } 4079 4080 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4081 SDValue Chain, SDValue Ptr, SDValue Cmp, 4082 SDValue Swp, MachinePointerInfo PtrInfo, 4083 unsigned Alignment, 4084 AtomicOrdering Ordering, 4085 SynchronizationScope SynchScope) { 4086 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4087 Alignment = getEVTAlignment(MemVT); 4088 4089 MachineFunction &MF = getMachineFunction(); 4090 4091 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE. 4092 // For now, atomics are considered to be volatile always. 4093 // FIXME: Volatile isn't really correct; we should keep track of atomic 4094 // orderings in the memoperand. 4095 unsigned Flags = MachineMemOperand::MOVolatile; 4096 if (Opcode != ISD::ATOMIC_STORE) 4097 Flags |= MachineMemOperand::MOLoad; 4098 if (Opcode != ISD::ATOMIC_LOAD) 4099 Flags |= MachineMemOperand::MOStore; 4100 4101 MachineMemOperand *MMO = 4102 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment); 4103 4104 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO, 4105 Ordering, SynchScope); 4106 } 4107 4108 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4109 SDValue Chain, 4110 SDValue Ptr, SDValue Cmp, 4111 SDValue Swp, MachineMemOperand *MMO, 4112 AtomicOrdering Ordering, 4113 SynchronizationScope SynchScope) { 4114 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op"); 4115 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); 4116 4117 EVT VT = Cmp.getValueType(); 4118 4119 SDVTList VTs = getVTList(VT, MVT::Other); 4120 FoldingSetNodeID ID; 4121 ID.AddInteger(MemVT.getRawBits()); 4122 SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; 4123 AddNodeIDNode(ID, Opcode, VTs, Ops, 4); 4124 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4125 void* IP = 0; 4126 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4127 cast<AtomicSDNode>(E)->refineAlignment(MMO); 4128 return SDValue(E, 0); 4129 } 4130 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, Chain, 4131 Ptr, Cmp, Swp, MMO, Ordering, 4132 SynchScope); 4133 CSEMap.InsertNode(N, IP); 4134 AllNodes.push_back(N); 4135 return SDValue(N, 0); 4136 } 4137 4138 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4139 SDValue Chain, 4140 SDValue Ptr, SDValue Val, 4141 const Value* PtrVal, 4142 unsigned Alignment, 4143 AtomicOrdering Ordering, 4144 SynchronizationScope SynchScope) { 4145 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4146 Alignment = getEVTAlignment(MemVT); 4147 4148 MachineFunction &MF = getMachineFunction(); 4149 // An atomic store does not load. An atomic load does not store. 4150 // (An atomicrmw obviously both loads and stores.) 4151 // For now, atomics are considered to be volatile always, and they are 4152 // chained as such. 4153 // FIXME: Volatile isn't really correct; we should keep track of atomic 4154 // orderings in the memoperand. 4155 unsigned Flags = MachineMemOperand::MOVolatile; 4156 if (Opcode != ISD::ATOMIC_STORE) 4157 Flags |= MachineMemOperand::MOLoad; 4158 if (Opcode != ISD::ATOMIC_LOAD) 4159 Flags |= MachineMemOperand::MOStore; 4160 4161 MachineMemOperand *MMO = 4162 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 4163 MemVT.getStoreSize(), Alignment); 4164 4165 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO, 4166 Ordering, SynchScope); 4167 } 4168 4169 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4170 SDValue Chain, 4171 SDValue Ptr, SDValue Val, 4172 MachineMemOperand *MMO, 4173 AtomicOrdering Ordering, 4174 SynchronizationScope SynchScope) { 4175 assert((Opcode == ISD::ATOMIC_LOAD_ADD || 4176 Opcode == ISD::ATOMIC_LOAD_SUB || 4177 Opcode == ISD::ATOMIC_LOAD_AND || 4178 Opcode == ISD::ATOMIC_LOAD_OR || 4179 Opcode == ISD::ATOMIC_LOAD_XOR || 4180 Opcode == ISD::ATOMIC_LOAD_NAND || 4181 Opcode == ISD::ATOMIC_LOAD_MIN || 4182 Opcode == ISD::ATOMIC_LOAD_MAX || 4183 Opcode == ISD::ATOMIC_LOAD_UMIN || 4184 Opcode == ISD::ATOMIC_LOAD_UMAX || 4185 Opcode == ISD::ATOMIC_SWAP || 4186 Opcode == ISD::ATOMIC_STORE) && 4187 "Invalid Atomic Op"); 4188 4189 EVT VT = Val.getValueType(); 4190 4191 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) : 4192 getVTList(VT, MVT::Other); 4193 FoldingSetNodeID ID; 4194 ID.AddInteger(MemVT.getRawBits()); 4195 SDValue Ops[] = {Chain, Ptr, Val}; 4196 AddNodeIDNode(ID, Opcode, VTs, Ops, 3); 4197 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4198 void* IP = 0; 4199 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4200 cast<AtomicSDNode>(E)->refineAlignment(MMO); 4201 return SDValue(E, 0); 4202 } 4203 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, Chain, 4204 Ptr, Val, MMO, 4205 Ordering, SynchScope); 4206 CSEMap.InsertNode(N, IP); 4207 AllNodes.push_back(N); 4208 return SDValue(N, 0); 4209 } 4210 4211 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4212 EVT VT, SDValue Chain, 4213 SDValue Ptr, 4214 const Value* PtrVal, 4215 unsigned Alignment, 4216 AtomicOrdering Ordering, 4217 SynchronizationScope SynchScope) { 4218 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4219 Alignment = getEVTAlignment(MemVT); 4220 4221 MachineFunction &MF = getMachineFunction(); 4222 // An atomic store does not load. An atomic load does not store. 4223 // (An atomicrmw obviously both loads and stores.) 4224 // For now, atomics are considered to be volatile always, and they are 4225 // chained as such. 4226 // FIXME: Volatile isn't really correct; we should keep track of atomic 4227 // orderings in the memoperand. 4228 unsigned Flags = MachineMemOperand::MOVolatile; 4229 if (Opcode != ISD::ATOMIC_STORE) 4230 Flags |= MachineMemOperand::MOLoad; 4231 if (Opcode != ISD::ATOMIC_LOAD) 4232 Flags |= MachineMemOperand::MOStore; 4233 4234 MachineMemOperand *MMO = 4235 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, 4236 MemVT.getStoreSize(), Alignment); 4237 4238 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO, 4239 Ordering, SynchScope); 4240 } 4241 4242 SDValue SelectionDAG::getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, 4243 EVT VT, SDValue Chain, 4244 SDValue Ptr, 4245 MachineMemOperand *MMO, 4246 AtomicOrdering Ordering, 4247 SynchronizationScope SynchScope) { 4248 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op"); 4249 4250 SDVTList VTs = getVTList(VT, MVT::Other); 4251 FoldingSetNodeID ID; 4252 ID.AddInteger(MemVT.getRawBits()); 4253 SDValue Ops[] = {Chain, Ptr}; 4254 AddNodeIDNode(ID, Opcode, VTs, Ops, 2); 4255 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4256 void* IP = 0; 4257 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4258 cast<AtomicSDNode>(E)->refineAlignment(MMO); 4259 return SDValue(E, 0); 4260 } 4261 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, MemVT, Chain, 4262 Ptr, MMO, Ordering, SynchScope); 4263 CSEMap.InsertNode(N, IP); 4264 AllNodes.push_back(N); 4265 return SDValue(N, 0); 4266 } 4267 4268 /// getMergeValues - Create a MERGE_VALUES node from the given operands. 4269 SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps, 4270 SDLoc dl) { 4271 if (NumOps == 1) 4272 return Ops[0]; 4273 4274 SmallVector<EVT, 4> VTs; 4275 VTs.reserve(NumOps); 4276 for (unsigned i = 0; i < NumOps; ++i) 4277 VTs.push_back(Ops[i].getValueType()); 4278 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps), 4279 Ops, NumOps); 4280 } 4281 4282 SDValue 4283 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, 4284 const EVT *VTs, unsigned NumVTs, 4285 const SDValue *Ops, unsigned NumOps, 4286 EVT MemVT, MachinePointerInfo PtrInfo, 4287 unsigned Align, bool Vol, 4288 bool ReadMem, bool WriteMem) { 4289 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps, 4290 MemVT, PtrInfo, Align, Vol, 4291 ReadMem, WriteMem); 4292 } 4293 4294 SDValue 4295 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4296 const SDValue *Ops, unsigned NumOps, 4297 EVT MemVT, MachinePointerInfo PtrInfo, 4298 unsigned Align, bool Vol, 4299 bool ReadMem, bool WriteMem) { 4300 if (Align == 0) // Ensure that codegen never sees alignment 0 4301 Align = getEVTAlignment(MemVT); 4302 4303 MachineFunction &MF = getMachineFunction(); 4304 unsigned Flags = 0; 4305 if (WriteMem) 4306 Flags |= MachineMemOperand::MOStore; 4307 if (ReadMem) 4308 Flags |= MachineMemOperand::MOLoad; 4309 if (Vol) 4310 Flags |= MachineMemOperand::MOVolatile; 4311 MachineMemOperand *MMO = 4312 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align); 4313 4314 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO); 4315 } 4316 4317 SDValue 4318 SelectionDAG::getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, 4319 const SDValue *Ops, unsigned NumOps, 4320 EVT MemVT, MachineMemOperand *MMO) { 4321 assert((Opcode == ISD::INTRINSIC_VOID || 4322 Opcode == ISD::INTRINSIC_W_CHAIN || 4323 Opcode == ISD::PREFETCH || 4324 Opcode == ISD::LIFETIME_START || 4325 Opcode == ISD::LIFETIME_END || 4326 (Opcode <= INT_MAX && 4327 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && 4328 "Opcode is not a memory-accessing opcode!"); 4329 4330 // Memoize the node unless it returns a flag. 4331 MemIntrinsicSDNode *N; 4332 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4333 FoldingSetNodeID ID; 4334 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 4335 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4336 void *IP = 0; 4337 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4338 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); 4339 return SDValue(E, 0); 4340 } 4341 4342 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTList, Ops, NumOps, 4343 MemVT, MMO); 4344 CSEMap.InsertNode(N, IP); 4345 } else { 4346 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl.getIROrder(), dl.getDebugLoc(), VTList, Ops, NumOps, 4347 MemVT, MMO); 4348 } 4349 AllNodes.push_back(N); 4350 return SDValue(N, 0); 4351 } 4352 4353 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4354 /// MachinePointerInfo record from it. This is particularly useful because the 4355 /// code generator has many cases where it doesn't bother passing in a 4356 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4357 static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) { 4358 // If this is FI+Offset, we can model it. 4359 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) 4360 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset); 4361 4362 // If this is (FI+Offset1)+Offset2, we can model it. 4363 if (Ptr.getOpcode() != ISD::ADD || 4364 !isa<ConstantSDNode>(Ptr.getOperand(1)) || 4365 !isa<FrameIndexSDNode>(Ptr.getOperand(0))) 4366 return MachinePointerInfo(); 4367 4368 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 4369 return MachinePointerInfo::getFixedStack(FI, Offset+ 4370 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); 4371 } 4372 4373 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a 4374 /// MachinePointerInfo record from it. This is particularly useful because the 4375 /// code generator has many cases where it doesn't bother passing in a 4376 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst". 4377 static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) { 4378 // If the 'Offset' value isn't a constant, we can't handle this. 4379 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp)) 4380 return InferPointerInfo(Ptr, OffsetNode->getSExtValue()); 4381 if (OffsetOp.getOpcode() == ISD::UNDEF) 4382 return InferPointerInfo(Ptr); 4383 return MachinePointerInfo(); 4384 } 4385 4386 4387 SDValue 4388 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4389 EVT VT, SDLoc dl, SDValue Chain, 4390 SDValue Ptr, SDValue Offset, 4391 MachinePointerInfo PtrInfo, EVT MemVT, 4392 bool isVolatile, bool isNonTemporal, bool isInvariant, 4393 unsigned Alignment, const MDNode *TBAAInfo, 4394 const MDNode *Ranges) { 4395 assert(Chain.getValueType() == MVT::Other && 4396 "Invalid chain type"); 4397 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4398 Alignment = getEVTAlignment(VT); 4399 4400 unsigned Flags = MachineMemOperand::MOLoad; 4401 if (isVolatile) 4402 Flags |= MachineMemOperand::MOVolatile; 4403 if (isNonTemporal) 4404 Flags |= MachineMemOperand::MONonTemporal; 4405 if (isInvariant) 4406 Flags |= MachineMemOperand::MOInvariant; 4407 4408 // If we don't have a PtrInfo, infer the trivial frame index case to simplify 4409 // clients. 4410 if (PtrInfo.V == 0) 4411 PtrInfo = InferPointerInfo(Ptr, Offset); 4412 4413 MachineFunction &MF = getMachineFunction(); 4414 MachineMemOperand *MMO = 4415 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, 4416 TBAAInfo, Ranges); 4417 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO); 4418 } 4419 4420 SDValue 4421 SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, 4422 EVT VT, SDLoc dl, SDValue Chain, 4423 SDValue Ptr, SDValue Offset, EVT MemVT, 4424 MachineMemOperand *MMO) { 4425 if (VT == MemVT) { 4426 ExtType = ISD::NON_EXTLOAD; 4427 } else if (ExtType == ISD::NON_EXTLOAD) { 4428 assert(VT == MemVT && "Non-extending load from different memory type!"); 4429 } else { 4430 // Extending load. 4431 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && 4432 "Should only be an extending load, not truncating!"); 4433 assert(VT.isInteger() == MemVT.isInteger() && 4434 "Cannot convert from FP to Int or Int -> FP!"); 4435 assert(VT.isVector() == MemVT.isVector() && 4436 "Cannot use trunc store to convert to or from a vector!"); 4437 assert((!VT.isVector() || 4438 VT.getVectorNumElements() == MemVT.getVectorNumElements()) && 4439 "Cannot use trunc store to change the number of vector elements!"); 4440 } 4441 4442 bool Indexed = AM != ISD::UNINDEXED; 4443 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) && 4444 "Unindexed load with an offset!"); 4445 4446 SDVTList VTs = Indexed ? 4447 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); 4448 SDValue Ops[] = { Chain, Ptr, Offset }; 4449 FoldingSetNodeID ID; 4450 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3); 4451 ID.AddInteger(MemVT.getRawBits()); 4452 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(), 4453 MMO->isNonTemporal(), 4454 MMO->isInvariant())); 4455 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4456 void *IP = 0; 4457 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4458 cast<LoadSDNode>(E)->refineAlignment(MMO); 4459 return SDValue(E, 0); 4460 } 4461 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ExtType, 4462 MemVT, MMO); 4463 CSEMap.InsertNode(N, IP); 4464 AllNodes.push_back(N); 4465 return SDValue(N, 0); 4466 } 4467 4468 SDValue SelectionDAG::getLoad(EVT VT, SDLoc dl, 4469 SDValue Chain, SDValue Ptr, 4470 MachinePointerInfo PtrInfo, 4471 bool isVolatile, bool isNonTemporal, 4472 bool isInvariant, unsigned Alignment, 4473 const MDNode *TBAAInfo, 4474 const MDNode *Ranges) { 4475 SDValue Undef = getUNDEF(Ptr.getValueType()); 4476 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef, 4477 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment, 4478 TBAAInfo, Ranges); 4479 } 4480 4481 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, 4482 SDValue Chain, SDValue Ptr, 4483 MachinePointerInfo PtrInfo, EVT MemVT, 4484 bool isVolatile, bool isNonTemporal, 4485 unsigned Alignment, const MDNode *TBAAInfo) { 4486 SDValue Undef = getUNDEF(Ptr.getValueType()); 4487 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, 4488 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment, 4489 TBAAInfo); 4490 } 4491 4492 4493 SDValue 4494 SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base, 4495 SDValue Offset, ISD::MemIndexedMode AM) { 4496 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); 4497 assert(LD->getOffset().getOpcode() == ISD::UNDEF && 4498 "Load is already a indexed load!"); 4499 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, 4500 LD->getChain(), Base, Offset, LD->getPointerInfo(), 4501 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), 4502 false, LD->getAlignment()); 4503 } 4504 4505 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 4506 SDValue Ptr, MachinePointerInfo PtrInfo, 4507 bool isVolatile, bool isNonTemporal, 4508 unsigned Alignment, const MDNode *TBAAInfo) { 4509 assert(Chain.getValueType() == MVT::Other && 4510 "Invalid chain type"); 4511 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4512 Alignment = getEVTAlignment(Val.getValueType()); 4513 4514 unsigned Flags = MachineMemOperand::MOStore; 4515 if (isVolatile) 4516 Flags |= MachineMemOperand::MOVolatile; 4517 if (isNonTemporal) 4518 Flags |= MachineMemOperand::MONonTemporal; 4519 4520 if (PtrInfo.V == 0) 4521 PtrInfo = InferPointerInfo(Ptr); 4522 4523 MachineFunction &MF = getMachineFunction(); 4524 MachineMemOperand *MMO = 4525 MF.getMachineMemOperand(PtrInfo, Flags, 4526 Val.getValueType().getStoreSize(), Alignment, 4527 TBAAInfo); 4528 4529 return getStore(Chain, dl, Val, Ptr, MMO); 4530 } 4531 4532 SDValue SelectionDAG::getStore(SDValue Chain, SDLoc dl, SDValue Val, 4533 SDValue Ptr, MachineMemOperand *MMO) { 4534 assert(Chain.getValueType() == MVT::Other && 4535 "Invalid chain type"); 4536 EVT VT = Val.getValueType(); 4537 SDVTList VTs = getVTList(MVT::Other); 4538 SDValue Undef = getUNDEF(Ptr.getValueType()); 4539 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4540 FoldingSetNodeID ID; 4541 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4542 ID.AddInteger(VT.getRawBits()); 4543 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(), 4544 MMO->isNonTemporal(), MMO->isInvariant())); 4545 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4546 void *IP = 0; 4547 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4548 cast<StoreSDNode>(E)->refineAlignment(MMO); 4549 return SDValue(E, 0); 4550 } 4551 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, ISD::UNINDEXED, 4552 false, VT, MMO); 4553 CSEMap.InsertNode(N, IP); 4554 AllNodes.push_back(N); 4555 return SDValue(N, 0); 4556 } 4557 4558 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 4559 SDValue Ptr, MachinePointerInfo PtrInfo, 4560 EVT SVT,bool isVolatile, bool isNonTemporal, 4561 unsigned Alignment, 4562 const MDNode *TBAAInfo) { 4563 assert(Chain.getValueType() == MVT::Other && 4564 "Invalid chain type"); 4565 if (Alignment == 0) // Ensure that codegen never sees alignment 0 4566 Alignment = getEVTAlignment(SVT); 4567 4568 unsigned Flags = MachineMemOperand::MOStore; 4569 if (isVolatile) 4570 Flags |= MachineMemOperand::MOVolatile; 4571 if (isNonTemporal) 4572 Flags |= MachineMemOperand::MONonTemporal; 4573 4574 if (PtrInfo.V == 0) 4575 PtrInfo = InferPointerInfo(Ptr); 4576 4577 MachineFunction &MF = getMachineFunction(); 4578 MachineMemOperand *MMO = 4579 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment, 4580 TBAAInfo); 4581 4582 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO); 4583 } 4584 4585 SDValue SelectionDAG::getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, 4586 SDValue Ptr, EVT SVT, 4587 MachineMemOperand *MMO) { 4588 EVT VT = Val.getValueType(); 4589 4590 assert(Chain.getValueType() == MVT::Other && 4591 "Invalid chain type"); 4592 if (VT == SVT) 4593 return getStore(Chain, dl, Val, Ptr, MMO); 4594 4595 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && 4596 "Should only be a truncating store, not extending!"); 4597 assert(VT.isInteger() == SVT.isInteger() && 4598 "Can't do FP-INT conversion!"); 4599 assert(VT.isVector() == SVT.isVector() && 4600 "Cannot use trunc store to convert to or from a vector!"); 4601 assert((!VT.isVector() || 4602 VT.getVectorNumElements() == SVT.getVectorNumElements()) && 4603 "Cannot use trunc store to change the number of vector elements!"); 4604 4605 SDVTList VTs = getVTList(MVT::Other); 4606 SDValue Undef = getUNDEF(Ptr.getValueType()); 4607 SDValue Ops[] = { Chain, Val, Ptr, Undef }; 4608 FoldingSetNodeID ID; 4609 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4610 ID.AddInteger(SVT.getRawBits()); 4611 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(), 4612 MMO->isNonTemporal(), MMO->isInvariant())); 4613 ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); 4614 void *IP = 0; 4615 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 4616 cast<StoreSDNode>(E)->refineAlignment(MMO); 4617 return SDValue(E, 0); 4618 } 4619 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, ISD::UNINDEXED, 4620 true, SVT, MMO); 4621 CSEMap.InsertNode(N, IP); 4622 AllNodes.push_back(N); 4623 return SDValue(N, 0); 4624 } 4625 4626 SDValue 4627 SelectionDAG::getIndexedStore(SDValue OrigStore, SDLoc dl, SDValue Base, 4628 SDValue Offset, ISD::MemIndexedMode AM) { 4629 StoreSDNode *ST = cast<StoreSDNode>(OrigStore); 4630 assert(ST->getOffset().getOpcode() == ISD::UNDEF && 4631 "Store is already a indexed store!"); 4632 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); 4633 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; 4634 FoldingSetNodeID ID; 4635 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); 4636 ID.AddInteger(ST->getMemoryVT().getRawBits()); 4637 ID.AddInteger(ST->getRawSubclassData()); 4638 ID.AddInteger(ST->getPointerInfo().getAddrSpace()); 4639 void *IP = 0; 4640 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4641 return SDValue(E, 0); 4642 4643 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl.getIROrder(), dl.getDebugLoc(), VTs, AM, 4644 ST->isTruncatingStore(), 4645 ST->getMemoryVT(), 4646 ST->getMemOperand()); 4647 CSEMap.InsertNode(N, IP); 4648 AllNodes.push_back(N); 4649 return SDValue(N, 0); 4650 } 4651 4652 SDValue SelectionDAG::getVAArg(EVT VT, SDLoc dl, 4653 SDValue Chain, SDValue Ptr, 4654 SDValue SV, 4655 unsigned Align) { 4656 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) }; 4657 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4); 4658 } 4659 4660 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 4661 const SDUse *Ops, unsigned NumOps) { 4662 switch (NumOps) { 4663 case 0: return getNode(Opcode, DL, VT); 4664 case 1: return getNode(Opcode, DL, VT, Ops[0]); 4665 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4666 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4667 default: break; 4668 } 4669 4670 // Copy from an SDUse array into an SDValue array for use with 4671 // the regular getNode logic. 4672 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps); 4673 return getNode(Opcode, DL, VT, &NewOps[0], NumOps); 4674 } 4675 4676 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, EVT VT, 4677 const SDValue *Ops, unsigned NumOps) { 4678 switch (NumOps) { 4679 case 0: return getNode(Opcode, DL, VT); 4680 case 1: return getNode(Opcode, DL, VT, Ops[0]); 4681 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]); 4682 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]); 4683 default: break; 4684 } 4685 4686 switch (Opcode) { 4687 default: break; 4688 case ISD::SELECT_CC: { 4689 assert(NumOps == 5 && "SELECT_CC takes 5 operands!"); 4690 assert(Ops[0].getValueType() == Ops[1].getValueType() && 4691 "LHS and RHS of condition must have same type!"); 4692 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4693 "True and False arms of SelectCC must have same type!"); 4694 assert(Ops[2].getValueType() == VT && 4695 "select_cc node must be of same type as true and false value!"); 4696 break; 4697 } 4698 case ISD::BR_CC: { 4699 assert(NumOps == 5 && "BR_CC takes 5 operands!"); 4700 assert(Ops[2].getValueType() == Ops[3].getValueType() && 4701 "LHS/RHS of comparison should match types!"); 4702 break; 4703 } 4704 } 4705 4706 // Memoize nodes. 4707 SDNode *N; 4708 SDVTList VTs = getVTList(VT); 4709 4710 if (VT != MVT::Glue) { 4711 FoldingSetNodeID ID; 4712 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps); 4713 void *IP = 0; 4714 4715 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4716 return SDValue(E, 0); 4717 4718 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Ops, NumOps); 4719 CSEMap.InsertNode(N, IP); 4720 } else { 4721 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs, Ops, NumOps); 4722 } 4723 4724 AllNodes.push_back(N); 4725 #ifndef NDEBUG 4726 VerifySDNode(N); 4727 #endif 4728 return SDValue(N, 0); 4729 } 4730 4731 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 4732 ArrayRef<EVT> ResultTys, 4733 const SDValue *Ops, unsigned NumOps) { 4734 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()), 4735 Ops, NumOps); 4736 } 4737 4738 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, 4739 const EVT *VTs, unsigned NumVTs, 4740 const SDValue *Ops, unsigned NumOps) { 4741 if (NumVTs == 1) 4742 return getNode(Opcode, DL, VTs[0], Ops, NumOps); 4743 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps); 4744 } 4745 4746 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 4747 const SDValue *Ops, unsigned NumOps) { 4748 if (VTList.NumVTs == 1) 4749 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps); 4750 4751 #if 0 4752 switch (Opcode) { 4753 // FIXME: figure out how to safely handle things like 4754 // int foo(int x) { return 1 << (x & 255); } 4755 // int bar() { return foo(256); } 4756 case ISD::SRA_PARTS: 4757 case ISD::SRL_PARTS: 4758 case ISD::SHL_PARTS: 4759 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG && 4760 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1) 4761 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 4762 else if (N3.getOpcode() == ISD::AND) 4763 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) { 4764 // If the and is only masking out bits that cannot effect the shift, 4765 // eliminate the and. 4766 unsigned NumBits = VT.getScalarType().getSizeInBits()*2; 4767 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1) 4768 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0)); 4769 } 4770 break; 4771 } 4772 #endif 4773 4774 // Memoize the node unless it returns a flag. 4775 SDNode *N; 4776 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 4777 FoldingSetNodeID ID; 4778 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 4779 void *IP = 0; 4780 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 4781 return SDValue(E, 0); 4782 4783 if (NumOps == 1) { 4784 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0]); 4785 } else if (NumOps == 2) { 4786 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1]); 4787 } else if (NumOps == 3) { 4788 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1], 4789 Ops[2]); 4790 } else { 4791 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops, NumOps); 4792 } 4793 CSEMap.InsertNode(N, IP); 4794 } else { 4795 if (NumOps == 1) { 4796 N = new (NodeAllocator) UnarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0]); 4797 } else if (NumOps == 2) { 4798 N = new (NodeAllocator) BinarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1]); 4799 } else if (NumOps == 3) { 4800 N = new (NodeAllocator) TernarySDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops[0], Ops[1], 4801 Ops[2]); 4802 } else { 4803 N = new (NodeAllocator) SDNode(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList, Ops, NumOps); 4804 } 4805 } 4806 AllNodes.push_back(N); 4807 #ifndef NDEBUG 4808 VerifySDNode(N); 4809 #endif 4810 return SDValue(N, 0); 4811 } 4812 4813 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList) { 4814 return getNode(Opcode, DL, VTList, 0, 0); 4815 } 4816 4817 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 4818 SDValue N1) { 4819 SDValue Ops[] = { N1 }; 4820 return getNode(Opcode, DL, VTList, Ops, 1); 4821 } 4822 4823 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 4824 SDValue N1, SDValue N2) { 4825 SDValue Ops[] = { N1, N2 }; 4826 return getNode(Opcode, DL, VTList, Ops, 2); 4827 } 4828 4829 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 4830 SDValue N1, SDValue N2, SDValue N3) { 4831 SDValue Ops[] = { N1, N2, N3 }; 4832 return getNode(Opcode, DL, VTList, Ops, 3); 4833 } 4834 4835 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 4836 SDValue N1, SDValue N2, SDValue N3, 4837 SDValue N4) { 4838 SDValue Ops[] = { N1, N2, N3, N4 }; 4839 return getNode(Opcode, DL, VTList, Ops, 4); 4840 } 4841 4842 SDValue SelectionDAG::getNode(unsigned Opcode, SDLoc DL, SDVTList VTList, 4843 SDValue N1, SDValue N2, SDValue N3, 4844 SDValue N4, SDValue N5) { 4845 SDValue Ops[] = { N1, N2, N3, N4, N5 }; 4846 return getNode(Opcode, DL, VTList, Ops, 5); 4847 } 4848 4849 SDVTList SelectionDAG::getVTList(EVT VT) { 4850 return makeVTList(SDNode::getValueTypeList(VT), 1); 4851 } 4852 4853 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) { 4854 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4855 E = VTList.rend(); I != E; ++I) 4856 if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2) 4857 return *I; 4858 4859 EVT *Array = Allocator.Allocate<EVT>(2); 4860 Array[0] = VT1; 4861 Array[1] = VT2; 4862 SDVTList Result = makeVTList(Array, 2); 4863 VTList.push_back(Result); 4864 return Result; 4865 } 4866 4867 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) { 4868 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4869 E = VTList.rend(); I != E; ++I) 4870 if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 && 4871 I->VTs[2] == VT3) 4872 return *I; 4873 4874 EVT *Array = Allocator.Allocate<EVT>(3); 4875 Array[0] = VT1; 4876 Array[1] = VT2; 4877 Array[2] = VT3; 4878 SDVTList Result = makeVTList(Array, 3); 4879 VTList.push_back(Result); 4880 return Result; 4881 } 4882 4883 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) { 4884 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4885 E = VTList.rend(); I != E; ++I) 4886 if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 && 4887 I->VTs[2] == VT3 && I->VTs[3] == VT4) 4888 return *I; 4889 4890 EVT *Array = Allocator.Allocate<EVT>(4); 4891 Array[0] = VT1; 4892 Array[1] = VT2; 4893 Array[2] = VT3; 4894 Array[3] = VT4; 4895 SDVTList Result = makeVTList(Array, 4); 4896 VTList.push_back(Result); 4897 return Result; 4898 } 4899 4900 SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) { 4901 switch (NumVTs) { 4902 case 0: llvm_unreachable("Cannot have nodes without results!"); 4903 case 1: return getVTList(VTs[0]); 4904 case 2: return getVTList(VTs[0], VTs[1]); 4905 case 3: return getVTList(VTs[0], VTs[1], VTs[2]); 4906 case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]); 4907 default: break; 4908 } 4909 4910 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(), 4911 E = VTList.rend(); I != E; ++I) { 4912 if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1]) 4913 continue; 4914 4915 if (std::equal(&VTs[2], &VTs[NumVTs], &I->VTs[2])) 4916 return *I; 4917 } 4918 4919 EVT *Array = Allocator.Allocate<EVT>(NumVTs); 4920 std::copy(VTs, VTs+NumVTs, Array); 4921 SDVTList Result = makeVTList(Array, NumVTs); 4922 VTList.push_back(Result); 4923 return Result; 4924 } 4925 4926 4927 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the 4928 /// specified operands. If the resultant node already exists in the DAG, 4929 /// this does not modify the specified node, instead it returns the node that 4930 /// already exists. If the resultant node does not exist in the DAG, the 4931 /// input node is returned. As a degenerate case, if you specify the same 4932 /// input operands as the node already has, the input node is returned. 4933 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) { 4934 assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); 4935 4936 // Check to see if there is no change. 4937 if (Op == N->getOperand(0)) return N; 4938 4939 // See if the modified node already exists. 4940 void *InsertPos = 0; 4941 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) 4942 return Existing; 4943 4944 // Nope it doesn't. Remove the node from its current place in the maps. 4945 if (InsertPos) 4946 if (!RemoveNodeFromCSEMaps(N)) 4947 InsertPos = 0; 4948 4949 // Now we update the operands. 4950 N->OperandList[0].set(Op); 4951 4952 // If this gets put into a CSE map, add it. 4953 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 4954 return N; 4955 } 4956 4957 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) { 4958 assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); 4959 4960 // Check to see if there is no change. 4961 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1)) 4962 return N; // No operands changed, just return the input node. 4963 4964 // See if the modified node already exists. 4965 void *InsertPos = 0; 4966 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) 4967 return Existing; 4968 4969 // Nope it doesn't. Remove the node from its current place in the maps. 4970 if (InsertPos) 4971 if (!RemoveNodeFromCSEMaps(N)) 4972 InsertPos = 0; 4973 4974 // Now we update the operands. 4975 if (N->OperandList[0] != Op1) 4976 N->OperandList[0].set(Op1); 4977 if (N->OperandList[1] != Op2) 4978 N->OperandList[1].set(Op2); 4979 4980 // If this gets put into a CSE map, add it. 4981 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 4982 return N; 4983 } 4984 4985 SDNode *SelectionDAG:: 4986 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) { 4987 SDValue Ops[] = { Op1, Op2, Op3 }; 4988 return UpdateNodeOperands(N, Ops, 3); 4989 } 4990 4991 SDNode *SelectionDAG:: 4992 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 4993 SDValue Op3, SDValue Op4) { 4994 SDValue Ops[] = { Op1, Op2, Op3, Op4 }; 4995 return UpdateNodeOperands(N, Ops, 4); 4996 } 4997 4998 SDNode *SelectionDAG:: 4999 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, 5000 SDValue Op3, SDValue Op4, SDValue Op5) { 5001 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; 5002 return UpdateNodeOperands(N, Ops, 5); 5003 } 5004 5005 SDNode *SelectionDAG:: 5006 UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) { 5007 assert(N->getNumOperands() == NumOps && 5008 "Update with wrong number of operands"); 5009 5010 // Check to see if there is no change. 5011 bool AnyChange = false; 5012 for (unsigned i = 0; i != NumOps; ++i) { 5013 if (Ops[i] != N->getOperand(i)) { 5014 AnyChange = true; 5015 break; 5016 } 5017 } 5018 5019 // No operands changed, just return the input node. 5020 if (!AnyChange) return N; 5021 5022 // See if the modified node already exists. 5023 void *InsertPos = 0; 5024 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos)) 5025 return Existing; 5026 5027 // Nope it doesn't. Remove the node from its current place in the maps. 5028 if (InsertPos) 5029 if (!RemoveNodeFromCSEMaps(N)) 5030 InsertPos = 0; 5031 5032 // Now we update the operands. 5033 for (unsigned i = 0; i != NumOps; ++i) 5034 if (N->OperandList[i] != Ops[i]) 5035 N->OperandList[i].set(Ops[i]); 5036 5037 // If this gets put into a CSE map, add it. 5038 if (InsertPos) CSEMap.InsertNode(N, InsertPos); 5039 return N; 5040 } 5041 5042 /// DropOperands - Release the operands and set this node to have 5043 /// zero operands. 5044 void SDNode::DropOperands() { 5045 // Unlike the code in MorphNodeTo that does this, we don't need to 5046 // watch for dead nodes here. 5047 for (op_iterator I = op_begin(), E = op_end(); I != E; ) { 5048 SDUse &Use = *I++; 5049 Use.set(SDValue()); 5050 } 5051 } 5052 5053 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a 5054 /// machine opcode. 5055 /// 5056 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5057 EVT VT) { 5058 SDVTList VTs = getVTList(VT); 5059 return SelectNodeTo(N, MachineOpc, VTs, 0, 0); 5060 } 5061 5062 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5063 EVT VT, SDValue Op1) { 5064 SDVTList VTs = getVTList(VT); 5065 SDValue Ops[] = { Op1 }; 5066 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); 5067 } 5068 5069 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5070 EVT VT, SDValue Op1, 5071 SDValue Op2) { 5072 SDVTList VTs = getVTList(VT); 5073 SDValue Ops[] = { Op1, Op2 }; 5074 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); 5075 } 5076 5077 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5078 EVT VT, SDValue Op1, 5079 SDValue Op2, SDValue Op3) { 5080 SDVTList VTs = getVTList(VT); 5081 SDValue Ops[] = { Op1, Op2, Op3 }; 5082 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 5083 } 5084 5085 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5086 EVT VT, const SDValue *Ops, 5087 unsigned NumOps) { 5088 SDVTList VTs = getVTList(VT); 5089 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5090 } 5091 5092 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5093 EVT VT1, EVT VT2, const SDValue *Ops, 5094 unsigned NumOps) { 5095 SDVTList VTs = getVTList(VT1, VT2); 5096 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5097 } 5098 5099 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5100 EVT VT1, EVT VT2) { 5101 SDVTList VTs = getVTList(VT1, VT2); 5102 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0); 5103 } 5104 5105 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5106 EVT VT1, EVT VT2, EVT VT3, 5107 const SDValue *Ops, unsigned NumOps) { 5108 SDVTList VTs = getVTList(VT1, VT2, VT3); 5109 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5110 } 5111 5112 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5113 EVT VT1, EVT VT2, EVT VT3, EVT VT4, 5114 const SDValue *Ops, unsigned NumOps) { 5115 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5116 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); 5117 } 5118 5119 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5120 EVT VT1, EVT VT2, 5121 SDValue Op1) { 5122 SDVTList VTs = getVTList(VT1, VT2); 5123 SDValue Ops[] = { Op1 }; 5124 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); 5125 } 5126 5127 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5128 EVT VT1, EVT VT2, 5129 SDValue Op1, SDValue Op2) { 5130 SDVTList VTs = getVTList(VT1, VT2); 5131 SDValue Ops[] = { Op1, Op2 }; 5132 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); 5133 } 5134 5135 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5136 EVT VT1, EVT VT2, 5137 SDValue Op1, SDValue Op2, 5138 SDValue Op3) { 5139 SDVTList VTs = getVTList(VT1, VT2); 5140 SDValue Ops[] = { Op1, Op2, Op3 }; 5141 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 5142 } 5143 5144 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5145 EVT VT1, EVT VT2, EVT VT3, 5146 SDValue Op1, SDValue Op2, 5147 SDValue Op3) { 5148 SDVTList VTs = getVTList(VT1, VT2, VT3); 5149 SDValue Ops[] = { Op1, Op2, Op3 }; 5150 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); 5151 } 5152 5153 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, 5154 SDVTList VTs, const SDValue *Ops, 5155 unsigned NumOps) { 5156 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps); 5157 // Reset the NodeID to -1. 5158 N->setNodeId(-1); 5159 return N; 5160 } 5161 5162 /// UpdadeSDLocOnMergedSDNode - If the opt level is -O0 then it throws away 5163 /// the line number information on the merged node since it is not possible to 5164 /// preserve the information that operation is associated with multiple lines. 5165 /// This will make the debugger working better at -O0, were there is a higher 5166 /// probability having other instructions associated with that line. 5167 /// 5168 /// For IROrder, we keep the smaller of the two 5169 SDNode *SelectionDAG::UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc OLoc) { 5170 DebugLoc NLoc = N->getDebugLoc(); 5171 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) && 5172 (OLoc.getDebugLoc() != NLoc)) { 5173 N->setDebugLoc(DebugLoc()); 5174 } 5175 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder()); 5176 N->setIROrder(Order); 5177 return N; 5178 } 5179 5180 /// MorphNodeTo - This *mutates* the specified node to have the specified 5181 /// return type, opcode, and operands. 5182 /// 5183 /// Note that MorphNodeTo returns the resultant node. If there is already a 5184 /// node of the specified opcode and operands, it returns that node instead of 5185 /// the current one. Note that the SDLoc need not be the same. 5186 /// 5187 /// Using MorphNodeTo is faster than creating a new node and swapping it in 5188 /// with ReplaceAllUsesWith both because it often avoids allocating a new 5189 /// node, and because it doesn't require CSE recalculation for any of 5190 /// the node's users. 5191 /// 5192 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, 5193 SDVTList VTs, const SDValue *Ops, 5194 unsigned NumOps) { 5195 // If an identical node already exists, use it. 5196 void *IP = 0; 5197 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) { 5198 FoldingSetNodeID ID; 5199 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps); 5200 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP)) 5201 return UpdadeSDLocOnMergedSDNode(ON, SDLoc(N)); 5202 } 5203 5204 if (!RemoveNodeFromCSEMaps(N)) 5205 IP = 0; 5206 5207 // Start the morphing. 5208 N->NodeType = Opc; 5209 N->ValueList = VTs.VTs; 5210 N->NumValues = VTs.NumVTs; 5211 5212 // Clear the operands list, updating used nodes to remove this from their 5213 // use list. Keep track of any operands that become dead as a result. 5214 SmallPtrSet<SDNode*, 16> DeadNodeSet; 5215 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) { 5216 SDUse &Use = *I++; 5217 SDNode *Used = Use.getNode(); 5218 Use.set(SDValue()); 5219 if (Used->use_empty()) 5220 DeadNodeSet.insert(Used); 5221 } 5222 5223 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) { 5224 // Initialize the memory references information. 5225 MN->setMemRefs(0, 0); 5226 // If NumOps is larger than the # of operands we can have in a 5227 // MachineSDNode, reallocate the operand list. 5228 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) { 5229 if (MN->OperandsNeedDelete) 5230 delete[] MN->OperandList; 5231 if (NumOps > array_lengthof(MN->LocalOperands)) 5232 // We're creating a final node that will live unmorphed for the 5233 // remainder of the current SelectionDAG iteration, so we can allocate 5234 // the operands directly out of a pool with no recycling metadata. 5235 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5236 Ops, NumOps); 5237 else 5238 MN->InitOperands(MN->LocalOperands, Ops, NumOps); 5239 MN->OperandsNeedDelete = false; 5240 } else 5241 MN->InitOperands(MN->OperandList, Ops, NumOps); 5242 } else { 5243 // If NumOps is larger than the # of operands we currently have, reallocate 5244 // the operand list. 5245 if (NumOps > N->NumOperands) { 5246 if (N->OperandsNeedDelete) 5247 delete[] N->OperandList; 5248 N->InitOperands(new SDUse[NumOps], Ops, NumOps); 5249 N->OperandsNeedDelete = true; 5250 } else 5251 N->InitOperands(N->OperandList, Ops, NumOps); 5252 } 5253 5254 // Delete any nodes that are still dead after adding the uses for the 5255 // new operands. 5256 if (!DeadNodeSet.empty()) { 5257 SmallVector<SDNode *, 16> DeadNodes; 5258 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(), 5259 E = DeadNodeSet.end(); I != E; ++I) 5260 if ((*I)->use_empty()) 5261 DeadNodes.push_back(*I); 5262 RemoveDeadNodes(DeadNodes); 5263 } 5264 5265 if (IP) 5266 CSEMap.InsertNode(N, IP); // Memoize the new node. 5267 return N; 5268 } 5269 5270 5271 /// getMachineNode - These are used for target selectors to create a new node 5272 /// with specified return type(s), MachineInstr opcode, and operands. 5273 /// 5274 /// Note that getMachineNode returns the resultant node. If there is already a 5275 /// node of the specified opcode and operands, it returns that node instead of 5276 /// the current one. 5277 MachineSDNode * 5278 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT) { 5279 SDVTList VTs = getVTList(VT); 5280 return getMachineNode(Opcode, dl, VTs, None); 5281 } 5282 5283 MachineSDNode * 5284 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, SDValue Op1) { 5285 SDVTList VTs = getVTList(VT); 5286 SDValue Ops[] = { Op1 }; 5287 return getMachineNode(Opcode, dl, VTs, Ops); 5288 } 5289 5290 MachineSDNode * 5291 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5292 SDValue Op1, SDValue Op2) { 5293 SDVTList VTs = getVTList(VT); 5294 SDValue Ops[] = { Op1, Op2 }; 5295 return getMachineNode(Opcode, dl, VTs, Ops); 5296 } 5297 5298 MachineSDNode * 5299 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5300 SDValue Op1, SDValue Op2, SDValue Op3) { 5301 SDVTList VTs = getVTList(VT); 5302 SDValue Ops[] = { Op1, Op2, Op3 }; 5303 return getMachineNode(Opcode, dl, VTs, Ops); 5304 } 5305 5306 MachineSDNode * 5307 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT, 5308 ArrayRef<SDValue> Ops) { 5309 SDVTList VTs = getVTList(VT); 5310 return getMachineNode(Opcode, dl, VTs, Ops); 5311 } 5312 5313 MachineSDNode * 5314 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2) { 5315 SDVTList VTs = getVTList(VT1, VT2); 5316 return getMachineNode(Opcode, dl, VTs, None); 5317 } 5318 5319 MachineSDNode * 5320 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5321 EVT VT1, EVT VT2, SDValue Op1) { 5322 SDVTList VTs = getVTList(VT1, VT2); 5323 SDValue Ops[] = { Op1 }; 5324 return getMachineNode(Opcode, dl, VTs, Ops); 5325 } 5326 5327 MachineSDNode * 5328 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5329 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) { 5330 SDVTList VTs = getVTList(VT1, VT2); 5331 SDValue Ops[] = { Op1, Op2 }; 5332 return getMachineNode(Opcode, dl, VTs, Ops); 5333 } 5334 5335 MachineSDNode * 5336 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5337 EVT VT1, EVT VT2, SDValue Op1, 5338 SDValue Op2, SDValue Op3) { 5339 SDVTList VTs = getVTList(VT1, VT2); 5340 SDValue Ops[] = { Op1, Op2, Op3 }; 5341 return getMachineNode(Opcode, dl, VTs, Ops); 5342 } 5343 5344 MachineSDNode * 5345 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5346 EVT VT1, EVT VT2, 5347 ArrayRef<SDValue> Ops) { 5348 SDVTList VTs = getVTList(VT1, VT2); 5349 return getMachineNode(Opcode, dl, VTs, Ops); 5350 } 5351 5352 MachineSDNode * 5353 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5354 EVT VT1, EVT VT2, EVT VT3, 5355 SDValue Op1, SDValue Op2) { 5356 SDVTList VTs = getVTList(VT1, VT2, VT3); 5357 SDValue Ops[] = { Op1, Op2 }; 5358 return getMachineNode(Opcode, dl, VTs, Ops); 5359 } 5360 5361 MachineSDNode * 5362 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5363 EVT VT1, EVT VT2, EVT VT3, 5364 SDValue Op1, SDValue Op2, SDValue Op3) { 5365 SDVTList VTs = getVTList(VT1, VT2, VT3); 5366 SDValue Ops[] = { Op1, Op2, Op3 }; 5367 return getMachineNode(Opcode, dl, VTs, Ops); 5368 } 5369 5370 MachineSDNode * 5371 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5372 EVT VT1, EVT VT2, EVT VT3, 5373 ArrayRef<SDValue> Ops) { 5374 SDVTList VTs = getVTList(VT1, VT2, VT3); 5375 return getMachineNode(Opcode, dl, VTs, Ops); 5376 } 5377 5378 MachineSDNode * 5379 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, 5380 EVT VT2, EVT VT3, EVT VT4, 5381 ArrayRef<SDValue> Ops) { 5382 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4); 5383 return getMachineNode(Opcode, dl, VTs, Ops); 5384 } 5385 5386 MachineSDNode * 5387 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc dl, 5388 ArrayRef<EVT> ResultTys, 5389 ArrayRef<SDValue> Ops) { 5390 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size()); 5391 return getMachineNode(Opcode, dl, VTs, Ops); 5392 } 5393 5394 MachineSDNode * 5395 SelectionDAG::getMachineNode(unsigned Opcode, SDLoc DL, SDVTList VTs, 5396 ArrayRef<SDValue> OpsArray) { 5397 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue; 5398 MachineSDNode *N; 5399 void *IP = 0; 5400 const SDValue *Ops = OpsArray.data(); 5401 unsigned NumOps = OpsArray.size(); 5402 5403 if (DoCSE) { 5404 FoldingSetNodeID ID; 5405 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps); 5406 IP = 0; 5407 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { 5408 return cast<MachineSDNode>(UpdadeSDLocOnMergedSDNode(E, DL)); 5409 } 5410 } 5411 5412 // Allocate a new MachineSDNode. 5413 N = new (NodeAllocator) MachineSDNode(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs); 5414 5415 // Initialize the operands list. 5416 if (NumOps > array_lengthof(N->LocalOperands)) 5417 // We're creating a final node that will live unmorphed for the 5418 // remainder of the current SelectionDAG iteration, so we can allocate 5419 // the operands directly out of a pool with no recycling metadata. 5420 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps), 5421 Ops, NumOps); 5422 else 5423 N->InitOperands(N->LocalOperands, Ops, NumOps); 5424 N->OperandsNeedDelete = false; 5425 5426 if (DoCSE) 5427 CSEMap.InsertNode(N, IP); 5428 5429 AllNodes.push_back(N); 5430 #ifndef NDEBUG 5431 VerifyMachineNode(N); 5432 #endif 5433 return N; 5434 } 5435 5436 /// getTargetExtractSubreg - A convenience function for creating 5437 /// TargetOpcode::EXTRACT_SUBREG nodes. 5438 SDValue 5439 SelectionDAG::getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT, 5440 SDValue Operand) { 5441 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5442 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, 5443 VT, Operand, SRIdxVal); 5444 return SDValue(Subreg, 0); 5445 } 5446 5447 /// getTargetInsertSubreg - A convenience function for creating 5448 /// TargetOpcode::INSERT_SUBREG nodes. 5449 SDValue 5450 SelectionDAG::getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT, 5451 SDValue Operand, SDValue Subreg) { 5452 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32); 5453 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL, 5454 VT, Operand, Subreg, SRIdxVal); 5455 return SDValue(Result, 0); 5456 } 5457 5458 /// getNodeIfExists - Get the specified node if it's already available, or 5459 /// else return NULL. 5460 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, 5461 const SDValue *Ops, unsigned NumOps) { 5462 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) { 5463 FoldingSetNodeID ID; 5464 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); 5465 void *IP = 0; 5466 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) 5467 return E; 5468 } 5469 return NULL; 5470 } 5471 5472 /// getDbgValue - Creates a SDDbgValue node. 5473 /// 5474 SDDbgValue * 5475 SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off, 5476 DebugLoc DL, unsigned O) { 5477 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O); 5478 } 5479 5480 SDDbgValue * 5481 SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off, 5482 DebugLoc DL, unsigned O) { 5483 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O); 5484 } 5485 5486 SDDbgValue * 5487 SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off, 5488 DebugLoc DL, unsigned O) { 5489 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O); 5490 } 5491 5492 namespace { 5493 5494 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node 5495 /// pointed to by a use iterator is deleted, increment the use iterator 5496 /// so that it doesn't dangle. 5497 /// 5498 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener { 5499 SDNode::use_iterator &UI; 5500 SDNode::use_iterator &UE; 5501 5502 virtual void NodeDeleted(SDNode *N, SDNode *E) { 5503 // Increment the iterator as needed. 5504 while (UI != UE && N == *UI) 5505 ++UI; 5506 } 5507 5508 public: 5509 RAUWUpdateListener(SelectionDAG &d, 5510 SDNode::use_iterator &ui, 5511 SDNode::use_iterator &ue) 5512 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {} 5513 }; 5514 5515 } 5516 5517 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5518 /// This can cause recursive merging of nodes in the DAG. 5519 /// 5520 /// This version assumes From has a single result value. 5521 /// 5522 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) { 5523 SDNode *From = FromN.getNode(); 5524 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 && 5525 "Cannot replace with this method!"); 5526 assert(From != To.getNode() && "Cannot replace uses of with self"); 5527 5528 // Iterate over all the existing uses of From. New uses will be added 5529 // to the beginning of the use list, which we avoid visiting. 5530 // This specifically avoids visiting uses of From that arise while the 5531 // replacement is happening, because any such uses would be the result 5532 // of CSE: If an existing node looks like From after one of its operands 5533 // is replaced by To, we don't want to replace of all its users with To 5534 // too. See PR3018 for more info. 5535 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5536 RAUWUpdateListener Listener(*this, UI, UE); 5537 while (UI != UE) { 5538 SDNode *User = *UI; 5539 5540 // This node is about to morph, remove its old self from the CSE maps. 5541 RemoveNodeFromCSEMaps(User); 5542 5543 // A user can appear in a use list multiple times, and when this 5544 // happens the uses are usually next to each other in the list. 5545 // To help reduce the number of CSE recomputations, process all 5546 // the uses of this user that we can find this way. 5547 do { 5548 SDUse &Use = UI.getUse(); 5549 ++UI; 5550 Use.set(To); 5551 } while (UI != UE && *UI == User); 5552 5553 // Now that we have modified User, add it back to the CSE maps. If it 5554 // already exists there, recursively merge the results together. 5555 AddModifiedNodeToCSEMaps(User); 5556 } 5557 5558 // If we just RAUW'd the root, take note. 5559 if (FromN == getRoot()) 5560 setRoot(To); 5561 } 5562 5563 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5564 /// This can cause recursive merging of nodes in the DAG. 5565 /// 5566 /// This version assumes that for each value of From, there is a 5567 /// corresponding value in To in the same position with the same type. 5568 /// 5569 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) { 5570 #ifndef NDEBUG 5571 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) 5572 assert((!From->hasAnyUseOfValue(i) || 5573 From->getValueType(i) == To->getValueType(i)) && 5574 "Cannot use this version of ReplaceAllUsesWith!"); 5575 #endif 5576 5577 // Handle the trivial case. 5578 if (From == To) 5579 return; 5580 5581 // Iterate over just the existing users of From. See the comments in 5582 // the ReplaceAllUsesWith above. 5583 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5584 RAUWUpdateListener Listener(*this, UI, UE); 5585 while (UI != UE) { 5586 SDNode *User = *UI; 5587 5588 // This node is about to morph, remove its old self from the CSE maps. 5589 RemoveNodeFromCSEMaps(User); 5590 5591 // A user can appear in a use list multiple times, and when this 5592 // happens the uses are usually next to each other in the list. 5593 // To help reduce the number of CSE recomputations, process all 5594 // the uses of this user that we can find this way. 5595 do { 5596 SDUse &Use = UI.getUse(); 5597 ++UI; 5598 Use.setNode(To); 5599 } while (UI != UE && *UI == User); 5600 5601 // Now that we have modified User, add it back to the CSE maps. If it 5602 // already exists there, recursively merge the results together. 5603 AddModifiedNodeToCSEMaps(User); 5604 } 5605 5606 // If we just RAUW'd the root, take note. 5607 if (From == getRoot().getNode()) 5608 setRoot(SDValue(To, getRoot().getResNo())); 5609 } 5610 5611 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead. 5612 /// This can cause recursive merging of nodes in the DAG. 5613 /// 5614 /// This version can replace From with any result values. To must match the 5615 /// number and types of values returned by From. 5616 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) { 5617 if (From->getNumValues() == 1) // Handle the simple case efficiently. 5618 return ReplaceAllUsesWith(SDValue(From, 0), To[0]); 5619 5620 // Iterate over just the existing users of From. See the comments in 5621 // the ReplaceAllUsesWith above. 5622 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end(); 5623 RAUWUpdateListener Listener(*this, UI, UE); 5624 while (UI != UE) { 5625 SDNode *User = *UI; 5626 5627 // This node is about to morph, remove its old self from the CSE maps. 5628 RemoveNodeFromCSEMaps(User); 5629 5630 // A user can appear in a use list multiple times, and when this 5631 // happens the uses are usually next to each other in the list. 5632 // To help reduce the number of CSE recomputations, process all 5633 // the uses of this user that we can find this way. 5634 do { 5635 SDUse &Use = UI.getUse(); 5636 const SDValue &ToOp = To[Use.getResNo()]; 5637 ++UI; 5638 Use.set(ToOp); 5639 } while (UI != UE && *UI == User); 5640 5641 // Now that we have modified User, add it back to the CSE maps. If it 5642 // already exists there, recursively merge the results together. 5643 AddModifiedNodeToCSEMaps(User); 5644 } 5645 5646 // If we just RAUW'd the root, take note. 5647 if (From == getRoot().getNode()) 5648 setRoot(SDValue(To[getRoot().getResNo()])); 5649 } 5650 5651 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving 5652 /// uses of other values produced by From.getNode() alone. The Deleted 5653 /// vector is handled the same way as for ReplaceAllUsesWith. 5654 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){ 5655 // Handle the really simple, really trivial case efficiently. 5656 if (From == To) return; 5657 5658 // Handle the simple, trivial, case efficiently. 5659 if (From.getNode()->getNumValues() == 1) { 5660 ReplaceAllUsesWith(From, To); 5661 return; 5662 } 5663 5664 // Iterate over just the existing users of From. See the comments in 5665 // the ReplaceAllUsesWith above. 5666 SDNode::use_iterator UI = From.getNode()->use_begin(), 5667 UE = From.getNode()->use_end(); 5668 RAUWUpdateListener Listener(*this, UI, UE); 5669 while (UI != UE) { 5670 SDNode *User = *UI; 5671 bool UserRemovedFromCSEMaps = false; 5672 5673 // A user can appear in a use list multiple times, and when this 5674 // happens the uses are usually next to each other in the list. 5675 // To help reduce the number of CSE recomputations, process all 5676 // the uses of this user that we can find this way. 5677 do { 5678 SDUse &Use = UI.getUse(); 5679 5680 // Skip uses of different values from the same node. 5681 if (Use.getResNo() != From.getResNo()) { 5682 ++UI; 5683 continue; 5684 } 5685 5686 // If this node hasn't been modified yet, it's still in the CSE maps, 5687 // so remove its old self from the CSE maps. 5688 if (!UserRemovedFromCSEMaps) { 5689 RemoveNodeFromCSEMaps(User); 5690 UserRemovedFromCSEMaps = true; 5691 } 5692 5693 ++UI; 5694 Use.set(To); 5695 } while (UI != UE && *UI == User); 5696 5697 // We are iterating over all uses of the From node, so if a use 5698 // doesn't use the specific value, no changes are made. 5699 if (!UserRemovedFromCSEMaps) 5700 continue; 5701 5702 // Now that we have modified User, add it back to the CSE maps. If it 5703 // already exists there, recursively merge the results together. 5704 AddModifiedNodeToCSEMaps(User); 5705 } 5706 5707 // If we just RAUW'd the root, take note. 5708 if (From == getRoot()) 5709 setRoot(To); 5710 } 5711 5712 namespace { 5713 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith 5714 /// to record information about a use. 5715 struct UseMemo { 5716 SDNode *User; 5717 unsigned Index; 5718 SDUse *Use; 5719 }; 5720 5721 /// operator< - Sort Memos by User. 5722 bool operator<(const UseMemo &L, const UseMemo &R) { 5723 return (intptr_t)L.User < (intptr_t)R.User; 5724 } 5725 } 5726 5727 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving 5728 /// uses of other values produced by From.getNode() alone. The same value 5729 /// may appear in both the From and To list. The Deleted vector is 5730 /// handled the same way as for ReplaceAllUsesWith. 5731 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, 5732 const SDValue *To, 5733 unsigned Num){ 5734 // Handle the simple, trivial case efficiently. 5735 if (Num == 1) 5736 return ReplaceAllUsesOfValueWith(*From, *To); 5737 5738 // Read up all the uses and make records of them. This helps 5739 // processing new uses that are introduced during the 5740 // replacement process. 5741 SmallVector<UseMemo, 4> Uses; 5742 for (unsigned i = 0; i != Num; ++i) { 5743 unsigned FromResNo = From[i].getResNo(); 5744 SDNode *FromNode = From[i].getNode(); 5745 for (SDNode::use_iterator UI = FromNode->use_begin(), 5746 E = FromNode->use_end(); UI != E; ++UI) { 5747 SDUse &Use = UI.getUse(); 5748 if (Use.getResNo() == FromResNo) { 5749 UseMemo Memo = { *UI, i, &Use }; 5750 Uses.push_back(Memo); 5751 } 5752 } 5753 } 5754 5755 // Sort the uses, so that all the uses from a given User are together. 5756 std::sort(Uses.begin(), Uses.end()); 5757 5758 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); 5759 UseIndex != UseIndexEnd; ) { 5760 // We know that this user uses some value of From. If it is the right 5761 // value, update it. 5762 SDNode *User = Uses[UseIndex].User; 5763 5764 // This node is about to morph, remove its old self from the CSE maps. 5765 RemoveNodeFromCSEMaps(User); 5766 5767 // The Uses array is sorted, so all the uses for a given User 5768 // are next to each other in the list. 5769 // To help reduce the number of CSE recomputations, process all 5770 // the uses of this user that we can find this way. 5771 do { 5772 unsigned i = Uses[UseIndex].Index; 5773 SDUse &Use = *Uses[UseIndex].Use; 5774 ++UseIndex; 5775 5776 Use.set(To[i]); 5777 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User); 5778 5779 // Now that we have modified User, add it back to the CSE maps. If it 5780 // already exists there, recursively merge the results together. 5781 AddModifiedNodeToCSEMaps(User); 5782 } 5783 } 5784 5785 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG 5786 /// based on their topological order. It returns the maximum id and a vector 5787 /// of the SDNodes* in assigned order by reference. 5788 unsigned SelectionDAG::AssignTopologicalOrder() { 5789 5790 unsigned DAGSize = 0; 5791 5792 // SortedPos tracks the progress of the algorithm. Nodes before it are 5793 // sorted, nodes after it are unsorted. When the algorithm completes 5794 // it is at the end of the list. 5795 allnodes_iterator SortedPos = allnodes_begin(); 5796 5797 // Visit all the nodes. Move nodes with no operands to the front of 5798 // the list immediately. Annotate nodes that do have operands with their 5799 // operand count. Before we do this, the Node Id fields of the nodes 5800 // may contain arbitrary values. After, the Node Id fields for nodes 5801 // before SortedPos will contain the topological sort index, and the 5802 // Node Id fields for nodes At SortedPos and after will contain the 5803 // count of outstanding operands. 5804 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) { 5805 SDNode *N = I++; 5806 checkForCycles(N); 5807 unsigned Degree = N->getNumOperands(); 5808 if (Degree == 0) { 5809 // A node with no uses, add it to the result array immediately. 5810 N->setNodeId(DAGSize++); 5811 allnodes_iterator Q = N; 5812 if (Q != SortedPos) 5813 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q)); 5814 assert(SortedPos != AllNodes.end() && "Overran node list"); 5815 ++SortedPos; 5816 } else { 5817 // Temporarily use the Node Id as scratch space for the degree count. 5818 N->setNodeId(Degree); 5819 } 5820 } 5821 5822 // Visit all the nodes. As we iterate, move nodes into sorted order, 5823 // such that by the time the end is reached all nodes will be sorted. 5824 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) { 5825 SDNode *N = I; 5826 checkForCycles(N); 5827 // N is in sorted position, so all its uses have one less operand 5828 // that needs to be sorted. 5829 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 5830 UI != UE; ++UI) { 5831 SDNode *P = *UI; 5832 unsigned Degree = P->getNodeId(); 5833 assert(Degree != 0 && "Invalid node degree"); 5834 --Degree; 5835 if (Degree == 0) { 5836 // All of P's operands are sorted, so P may sorted now. 5837 P->setNodeId(DAGSize++); 5838 if (P != SortedPos) 5839 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P)); 5840 assert(SortedPos != AllNodes.end() && "Overran node list"); 5841 ++SortedPos; 5842 } else { 5843 // Update P's outstanding operand count. 5844 P->setNodeId(Degree); 5845 } 5846 } 5847 if (I == SortedPos) { 5848 #ifndef NDEBUG 5849 SDNode *S = ++I; 5850 dbgs() << "Overran sorted position:\n"; 5851 S->dumprFull(); 5852 #endif 5853 llvm_unreachable(0); 5854 } 5855 } 5856 5857 assert(SortedPos == AllNodes.end() && 5858 "Topological sort incomplete!"); 5859 assert(AllNodes.front().getOpcode() == ISD::EntryToken && 5860 "First node in topological sort is not the entry token!"); 5861 assert(AllNodes.front().getNodeId() == 0 && 5862 "First node in topological sort has non-zero id!"); 5863 assert(AllNodes.front().getNumOperands() == 0 && 5864 "First node in topological sort has operands!"); 5865 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 && 5866 "Last node in topologic sort has unexpected id!"); 5867 assert(AllNodes.back().use_empty() && 5868 "Last node in topologic sort has users!"); 5869 assert(DAGSize == allnodes_size() && "Node count mismatch!"); 5870 return DAGSize; 5871 } 5872 5873 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the 5874 /// value is produced by SD. 5875 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) { 5876 DbgInfo->add(DB, SD, isParameter); 5877 if (SD) 5878 SD->setHasDebugValue(true); 5879 } 5880 5881 /// TransferDbgValues - Transfer SDDbgValues. 5882 void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) { 5883 if (From == To || !From.getNode()->getHasDebugValue()) 5884 return; 5885 SDNode *FromNode = From.getNode(); 5886 SDNode *ToNode = To.getNode(); 5887 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode); 5888 SmallVector<SDDbgValue *, 2> ClonedDVs; 5889 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end(); 5890 I != E; ++I) { 5891 SDDbgValue *Dbg = *I; 5892 if (Dbg->getKind() == SDDbgValue::SDNODE) { 5893 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(), 5894 Dbg->getOffset(), Dbg->getDebugLoc(), 5895 Dbg->getOrder()); 5896 ClonedDVs.push_back(Clone); 5897 } 5898 } 5899 for (SmallVectorImpl<SDDbgValue *>::iterator I = ClonedDVs.begin(), 5900 E = ClonedDVs.end(); I != E; ++I) 5901 AddDbgValue(*I, ToNode, false); 5902 } 5903 5904 //===----------------------------------------------------------------------===// 5905 // SDNode Class 5906 //===----------------------------------------------------------------------===// 5907 5908 HandleSDNode::~HandleSDNode() { 5909 DropOperands(); 5910 } 5911 5912 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order, 5913 DebugLoc DL, const GlobalValue *GA, 5914 EVT VT, int64_t o, unsigned char TF) 5915 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) { 5916 TheGlobal = GA; 5917 } 5918 5919 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 5920 EVT memvt, MachineMemOperand *mmo) 5921 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) { 5922 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 5923 MMO->isNonTemporal(), MMO->isInvariant()); 5924 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 5925 assert(isNonTemporal() == MMO->isNonTemporal() && 5926 "Non-temporal encoding error!"); 5927 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!"); 5928 } 5929 5930 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs, 5931 const SDValue *Ops, unsigned NumOps, EVT memvt, 5932 MachineMemOperand *mmo) 5933 : SDNode(Opc, Order, dl, VTs, Ops, NumOps), 5934 MemoryVT(memvt), MMO(mmo) { 5935 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(), 5936 MMO->isNonTemporal(), MMO->isInvariant()); 5937 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!"); 5938 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!"); 5939 } 5940 5941 /// Profile - Gather unique data for the node. 5942 /// 5943 void SDNode::Profile(FoldingSetNodeID &ID) const { 5944 AddNodeIDNode(ID, this); 5945 } 5946 5947 namespace { 5948 struct EVTArray { 5949 std::vector<EVT> VTs; 5950 5951 EVTArray() { 5952 VTs.reserve(MVT::LAST_VALUETYPE); 5953 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) 5954 VTs.push_back(MVT((MVT::SimpleValueType)i)); 5955 } 5956 }; 5957 } 5958 5959 static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs; 5960 static ManagedStatic<EVTArray> SimpleVTArray; 5961 static ManagedStatic<sys::SmartMutex<true> > VTMutex; 5962 5963 /// getValueTypeList - Return a pointer to the specified value type. 5964 /// 5965 const EVT *SDNode::getValueTypeList(EVT VT) { 5966 if (VT.isExtended()) { 5967 sys::SmartScopedLock<true> Lock(*VTMutex); 5968 return &(*EVTs->insert(VT).first); 5969 } else { 5970 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && 5971 "Value type out of range!"); 5972 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy]; 5973 } 5974 } 5975 5976 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the 5977 /// indicated value. This method ignores uses of other values defined by this 5978 /// operation. 5979 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { 5980 assert(Value < getNumValues() && "Bad value!"); 5981 5982 // TODO: Only iterate over uses of a given value of the node 5983 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { 5984 if (UI.getUse().getResNo() == Value) { 5985 if (NUses == 0) 5986 return false; 5987 --NUses; 5988 } 5989 } 5990 5991 // Found exactly the right number of uses? 5992 return NUses == 0; 5993 } 5994 5995 5996 /// hasAnyUseOfValue - Return true if there are any use of the indicated 5997 /// value. This method ignores uses of other values defined by this operation. 5998 bool SDNode::hasAnyUseOfValue(unsigned Value) const { 5999 assert(Value < getNumValues() && "Bad value!"); 6000 6001 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) 6002 if (UI.getUse().getResNo() == Value) 6003 return true; 6004 6005 return false; 6006 } 6007 6008 6009 /// isOnlyUserOf - Return true if this node is the only use of N. 6010 /// 6011 bool SDNode::isOnlyUserOf(SDNode *N) const { 6012 bool Seen = false; 6013 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { 6014 SDNode *User = *I; 6015 if (User == this) 6016 Seen = true; 6017 else 6018 return false; 6019 } 6020 6021 return Seen; 6022 } 6023 6024 /// isOperand - Return true if this node is an operand of N. 6025 /// 6026 bool SDValue::isOperandOf(SDNode *N) const { 6027 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 6028 if (*this == N->getOperand(i)) 6029 return true; 6030 return false; 6031 } 6032 6033 bool SDNode::isOperandOf(SDNode *N) const { 6034 for (unsigned i = 0, e = N->NumOperands; i != e; ++i) 6035 if (this == N->OperandList[i].getNode()) 6036 return true; 6037 return false; 6038 } 6039 6040 /// reachesChainWithoutSideEffects - Return true if this operand (which must 6041 /// be a chain) reaches the specified operand without crossing any 6042 /// side-effecting instructions on any chain path. In practice, this looks 6043 /// through token factors and non-volatile loads. In order to remain efficient, 6044 /// this only looks a couple of nodes in, it does not do an exhaustive search. 6045 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, 6046 unsigned Depth) const { 6047 if (*this == Dest) return true; 6048 6049 // Don't search too deeply, we just want to be able to see through 6050 // TokenFactor's etc. 6051 if (Depth == 0) return false; 6052 6053 // If this is a token factor, all inputs to the TF happen in parallel. If any 6054 // of the operands of the TF does not reach dest, then we cannot do the xform. 6055 if (getOpcode() == ISD::TokenFactor) { 6056 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) 6057 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1)) 6058 return false; 6059 return true; 6060 } 6061 6062 // Loads don't have side effects, look through them. 6063 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) { 6064 if (!Ld->isVolatile()) 6065 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); 6066 } 6067 return false; 6068 } 6069 6070 /// hasPredecessor - Return true if N is a predecessor of this node. 6071 /// N is either an operand of this node, or can be reached by recursively 6072 /// traversing up the operands. 6073 /// NOTE: This is an expensive method. Use it carefully. 6074 bool SDNode::hasPredecessor(const SDNode *N) const { 6075 SmallPtrSet<const SDNode *, 32> Visited; 6076 SmallVector<const SDNode *, 16> Worklist; 6077 return hasPredecessorHelper(N, Visited, Worklist); 6078 } 6079 6080 bool 6081 SDNode::hasPredecessorHelper(const SDNode *N, 6082 SmallPtrSet<const SDNode *, 32> &Visited, 6083 SmallVectorImpl<const SDNode *> &Worklist) const { 6084 if (Visited.empty()) { 6085 Worklist.push_back(this); 6086 } else { 6087 // Take a look in the visited set. If we've already encountered this node 6088 // we needn't search further. 6089 if (Visited.count(N)) 6090 return true; 6091 } 6092 6093 // Haven't visited N yet. Continue the search. 6094 while (!Worklist.empty()) { 6095 const SDNode *M = Worklist.pop_back_val(); 6096 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 6097 SDNode *Op = M->getOperand(i).getNode(); 6098 if (Visited.insert(Op)) 6099 Worklist.push_back(Op); 6100 if (Op == N) 6101 return true; 6102 } 6103 } 6104 6105 return false; 6106 } 6107 6108 uint64_t SDNode::getConstantOperandVal(unsigned Num) const { 6109 assert(Num < NumOperands && "Invalid child # of SDNode!"); 6110 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue(); 6111 } 6112 6113 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) { 6114 assert(N->getNumValues() == 1 && 6115 "Can't unroll a vector with multiple results!"); 6116 6117 EVT VT = N->getValueType(0); 6118 unsigned NE = VT.getVectorNumElements(); 6119 EVT EltVT = VT.getVectorElementType(); 6120 SDLoc dl(N); 6121 6122 SmallVector<SDValue, 8> Scalars; 6123 SmallVector<SDValue, 4> Operands(N->getNumOperands()); 6124 6125 // If ResNE is 0, fully unroll the vector op. 6126 if (ResNE == 0) 6127 ResNE = NE; 6128 else if (NE > ResNE) 6129 NE = ResNE; 6130 6131 unsigned i; 6132 for (i= 0; i != NE; ++i) { 6133 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) { 6134 SDValue Operand = N->getOperand(j); 6135 EVT OperandVT = Operand.getValueType(); 6136 if (OperandVT.isVector()) { 6137 // A vector operand; extract a single element. 6138 const TargetLowering *TLI = TM.getTargetLowering(); 6139 EVT OperandEltVT = OperandVT.getVectorElementType(); 6140 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, 6141 OperandEltVT, 6142 Operand, 6143 getConstant(i, TLI->getVectorIdxTy())); 6144 } else { 6145 // A scalar operand; just use it as is. 6146 Operands[j] = Operand; 6147 } 6148 } 6149 6150 switch (N->getOpcode()) { 6151 default: 6152 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 6153 &Operands[0], Operands.size())); 6154 break; 6155 case ISD::VSELECT: 6156 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, 6157 &Operands[0], Operands.size())); 6158 break; 6159 case ISD::SHL: 6160 case ISD::SRA: 6161 case ISD::SRL: 6162 case ISD::ROTL: 6163 case ISD::ROTR: 6164 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0], 6165 getShiftAmountOperand(Operands[0].getValueType(), 6166 Operands[1]))); 6167 break; 6168 case ISD::SIGN_EXTEND_INREG: 6169 case ISD::FP_ROUND_INREG: { 6170 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType(); 6171 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, 6172 Operands[0], 6173 getValueType(ExtVT))); 6174 } 6175 } 6176 } 6177 6178 for (; i < ResNE; ++i) 6179 Scalars.push_back(getUNDEF(EltVT)); 6180 6181 return getNode(ISD::BUILD_VECTOR, dl, 6182 EVT::getVectorVT(*getContext(), EltVT, ResNE), 6183 &Scalars[0], Scalars.size()); 6184 } 6185 6186 6187 /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a 6188 /// location that is 'Dist' units away from the location that the 'Base' load 6189 /// is loading from. 6190 bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base, 6191 unsigned Bytes, int Dist) const { 6192 if (LD->getChain() != Base->getChain()) 6193 return false; 6194 EVT VT = LD->getValueType(0); 6195 if (VT.getSizeInBits() / 8 != Bytes) 6196 return false; 6197 6198 SDValue Loc = LD->getOperand(1); 6199 SDValue BaseLoc = Base->getOperand(1); 6200 if (Loc.getOpcode() == ISD::FrameIndex) { 6201 if (BaseLoc.getOpcode() != ISD::FrameIndex) 6202 return false; 6203 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo(); 6204 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 6205 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 6206 int FS = MFI->getObjectSize(FI); 6207 int BFS = MFI->getObjectSize(BFI); 6208 if (FS != BFS || FS != (int)Bytes) return false; 6209 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 6210 } 6211 6212 // Handle X+C 6213 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc && 6214 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes) 6215 return true; 6216 6217 const GlobalValue *GV1 = NULL; 6218 const GlobalValue *GV2 = NULL; 6219 int64_t Offset1 = 0; 6220 int64_t Offset2 = 0; 6221 const TargetLowering *TLI = TM.getTargetLowering(); 6222 bool isGA1 = TLI->isGAPlusOffset(Loc.getNode(), GV1, Offset1); 6223 bool isGA2 = TLI->isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 6224 if (isGA1 && isGA2 && GV1 == GV2) 6225 return Offset1 == (Offset2 + Dist*Bytes); 6226 return false; 6227 } 6228 6229 6230 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if 6231 /// it cannot be inferred. 6232 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { 6233 // If this is a GlobalAddress + cst, return the alignment. 6234 const GlobalValue *GV; 6235 int64_t GVOffset = 0; 6236 const TargetLowering *TLI = TM.getTargetLowering(); 6237 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) { 6238 unsigned PtrWidth = TLI->getPointerTy().getSizeInBits(); 6239 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); 6240 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne, 6241 TLI->getDataLayout()); 6242 unsigned AlignBits = KnownZero.countTrailingOnes(); 6243 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; 6244 if (Align) 6245 return MinAlign(Align, GVOffset); 6246 } 6247 6248 // If this is a direct reference to a stack slot, use information about the 6249 // stack slot's alignment. 6250 int FrameIdx = 1 << 31; 6251 int64_t FrameOffset = 0; 6252 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) { 6253 FrameIdx = FI->getIndex(); 6254 } else if (isBaseWithConstantOffset(Ptr) && 6255 isa<FrameIndexSDNode>(Ptr.getOperand(0))) { 6256 // Handle FI+Cst 6257 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); 6258 FrameOffset = Ptr.getConstantOperandVal(1); 6259 } 6260 6261 if (FrameIdx != (1 << 31)) { 6262 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo(); 6263 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx), 6264 FrameOffset); 6265 return FIInfoAlign; 6266 } 6267 6268 return 0; 6269 } 6270 6271 // getAddressSpace - Return the address space this GlobalAddress belongs to. 6272 unsigned GlobalAddressSDNode::getAddressSpace() const { 6273 return getGlobal()->getType()->getAddressSpace(); 6274 } 6275 6276 6277 Type *ConstantPoolSDNode::getType() const { 6278 if (isMachineConstantPoolEntry()) 6279 return Val.MachineCPVal->getType(); 6280 return Val.ConstVal->getType(); 6281 } 6282 6283 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, 6284 APInt &SplatUndef, 6285 unsigned &SplatBitSize, 6286 bool &HasAnyUndefs, 6287 unsigned MinSplatBits, 6288 bool isBigEndian) { 6289 EVT VT = getValueType(0); 6290 assert(VT.isVector() && "Expected a vector type"); 6291 unsigned sz = VT.getSizeInBits(); 6292 if (MinSplatBits > sz) 6293 return false; 6294 6295 SplatValue = APInt(sz, 0); 6296 SplatUndef = APInt(sz, 0); 6297 6298 // Get the bits. Bits with undefined values (when the corresponding element 6299 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared 6300 // in SplatValue. If any of the values are not constant, give up and return 6301 // false. 6302 unsigned int nOps = getNumOperands(); 6303 assert(nOps > 0 && "isConstantSplat has 0-size build vector"); 6304 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits(); 6305 6306 for (unsigned j = 0; j < nOps; ++j) { 6307 unsigned i = isBigEndian ? nOps-1-j : j; 6308 SDValue OpVal = getOperand(i); 6309 unsigned BitPos = j * EltBitSize; 6310 6311 if (OpVal.getOpcode() == ISD::UNDEF) 6312 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); 6313 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) 6314 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). 6315 zextOrTrunc(sz) << BitPos; 6316 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) 6317 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos; 6318 else 6319 return false; 6320 } 6321 6322 // The build_vector is all constants or undefs. Find the smallest element 6323 // size that splats the vector. 6324 6325 HasAnyUndefs = (SplatUndef != 0); 6326 while (sz > 8) { 6327 6328 unsigned HalfSize = sz / 2; 6329 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize); 6330 APInt LowValue = SplatValue.trunc(HalfSize); 6331 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize); 6332 APInt LowUndef = SplatUndef.trunc(HalfSize); 6333 6334 // If the two halves do not match (ignoring undef bits), stop here. 6335 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) || 6336 MinSplatBits > HalfSize) 6337 break; 6338 6339 SplatValue = HighValue | LowValue; 6340 SplatUndef = HighUndef & LowUndef; 6341 6342 sz = HalfSize; 6343 } 6344 6345 SplatBitSize = sz; 6346 return true; 6347 } 6348 6349 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) { 6350 // Find the first non-undef value in the shuffle mask. 6351 unsigned i, e; 6352 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i) 6353 /* search */; 6354 6355 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!"); 6356 6357 // Make sure all remaining elements are either undef or the same as the first 6358 // non-undef value. 6359 for (int Idx = Mask[i]; i != e; ++i) 6360 if (Mask[i] >= 0 && Mask[i] != Idx) 6361 return false; 6362 return true; 6363 } 6364 6365 #ifdef XDEBUG 6366 static void checkForCyclesHelper(const SDNode *N, 6367 SmallPtrSet<const SDNode*, 32> &Visited, 6368 SmallPtrSet<const SDNode*, 32> &Checked) { 6369 // If this node has already been checked, don't check it again. 6370 if (Checked.count(N)) 6371 return; 6372 6373 // If a node has already been visited on this depth-first walk, reject it as 6374 // a cycle. 6375 if (!Visited.insert(N)) { 6376 dbgs() << "Offending node:\n"; 6377 N->dumprFull(); 6378 errs() << "Detected cycle in SelectionDAG\n"; 6379 abort(); 6380 } 6381 6382 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 6383 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked); 6384 6385 Checked.insert(N); 6386 Visited.erase(N); 6387 } 6388 #endif 6389 6390 void llvm::checkForCycles(const llvm::SDNode *N) { 6391 #ifdef XDEBUG 6392 assert(N && "Checking nonexistant SDNode"); 6393 SmallPtrSet<const SDNode*, 32> visited; 6394 SmallPtrSet<const SDNode*, 32> checked; 6395 checkForCyclesHelper(N, visited, checked); 6396 #endif 6397 } 6398 6399 void llvm::checkForCycles(const llvm::SelectionDAG *DAG) { 6400 checkForCycles(DAG->getRoot().getNode()); 6401 } 6402