1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the XCoreTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "XCoreISelLowering.h" 15 #include "XCore.h" 16 #include "XCoreMachineFunctionInfo.h" 17 #include "XCoreSubtarget.h" 18 #include "XCoreTargetMachine.h" 19 #include "XCoreTargetObjectFile.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineJumpTableInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAGISel.h" 27 #include "llvm/CodeGen/ValueTypes.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 #define DEBUG_TYPE "xcore-lower" 43 44 const char *XCoreTargetLowering:: 45 getTargetNodeName(unsigned Opcode) const 46 { 47 switch ((XCoreISD::NodeType)Opcode) 48 { 49 case XCoreISD::FIRST_NUMBER : break; 50 case XCoreISD::BL : return "XCoreISD::BL"; 51 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 52 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 53 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 54 case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; 55 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 56 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 57 case XCoreISD::LADD : return "XCoreISD::LADD"; 58 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 59 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 60 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 61 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 62 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 63 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 64 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 65 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; 66 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; 67 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 68 } 69 return nullptr; 70 } 71 72 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM, 73 const XCoreSubtarget &Subtarget) 74 : TargetLowering(TM), TM(TM), Subtarget(Subtarget) { 75 76 // Set up the register classes. 77 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 78 79 // Compute derived properties from the register classes 80 computeRegisterProperties(Subtarget.getRegisterInfo()); 81 82 setStackPointerRegisterToSaveRestore(XCore::SP); 83 84 setSchedulingPreference(Sched::Source); 85 86 // Use i32 for setcc operations results (slt, sgt, ...). 87 setBooleanContents(ZeroOrOneBooleanContent); 88 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 89 90 // XCore does not have the NodeTypes below. 91 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 92 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 93 setOperationAction(ISD::ADDC, MVT::i32, Expand); 94 setOperationAction(ISD::ADDE, MVT::i32, Expand); 95 setOperationAction(ISD::SUBC, MVT::i32, Expand); 96 setOperationAction(ISD::SUBE, MVT::i32, Expand); 97 98 // 64bit 99 setOperationAction(ISD::ADD, MVT::i64, Custom); 100 setOperationAction(ISD::SUB, MVT::i64, Custom); 101 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 102 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 103 setOperationAction(ISD::MULHS, MVT::i32, Expand); 104 setOperationAction(ISD::MULHU, MVT::i32, Expand); 105 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 106 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 107 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 108 109 // Bit Manipulation 110 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 111 setOperationAction(ISD::ROTL , MVT::i32, Expand); 112 setOperationAction(ISD::ROTR , MVT::i32, Expand); 113 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 114 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 115 116 setOperationAction(ISD::TRAP, MVT::Other, Legal); 117 118 // Jump tables. 119 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 120 121 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 122 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 123 124 // Conversion of i64 -> double produces constantpool nodes 125 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 126 127 // Loads 128 for (MVT VT : MVT::integer_valuetypes()) { 129 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 130 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 131 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 132 133 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 134 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Expand); 135 } 136 137 // Custom expand misaligned loads / stores. 138 setOperationAction(ISD::LOAD, MVT::i32, Custom); 139 setOperationAction(ISD::STORE, MVT::i32, Custom); 140 141 // Varargs 142 setOperationAction(ISD::VAEND, MVT::Other, Expand); 143 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 144 setOperationAction(ISD::VAARG, MVT::Other, Custom); 145 setOperationAction(ISD::VASTART, MVT::Other, Custom); 146 147 // Dynamic stack 148 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 149 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 150 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 151 152 // Exception handling 153 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 154 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 155 156 // Atomic operations 157 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. 158 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. 159 setInsertFencesForAtomic(true); 160 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 161 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 162 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 163 164 // TRAMPOLINE is custom lowered. 165 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 166 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 167 168 // We want to custom lower some of our intrinsics. 169 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 170 171 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 172 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 173 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 174 175 // We have target-specific dag combine patterns for the following nodes: 176 setTargetDAGCombine(ISD::STORE); 177 setTargetDAGCombine(ISD::ADD); 178 setTargetDAGCombine(ISD::INTRINSIC_VOID); 179 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 180 181 setMinFunctionAlignment(1); 182 setPrefFunctionAlignment(2); 183 } 184 185 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 186 if (Val.getOpcode() != ISD::LOAD) 187 return false; 188 189 EVT VT1 = Val.getValueType(); 190 if (!VT1.isSimple() || !VT1.isInteger() || 191 !VT2.isSimple() || !VT2.isInteger()) 192 return false; 193 194 switch (VT1.getSimpleVT().SimpleTy) { 195 default: break; 196 case MVT::i8: 197 return true; 198 } 199 200 return false; 201 } 202 203 SDValue XCoreTargetLowering:: 204 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 205 switch (Op.getOpcode()) 206 { 207 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 208 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 209 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 210 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 211 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 212 case ISD::LOAD: return LowerLOAD(Op, DAG); 213 case ISD::STORE: return LowerSTORE(Op, DAG); 214 case ISD::VAARG: return LowerVAARG(Op, DAG); 215 case ISD::VASTART: return LowerVASTART(Op, DAG); 216 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 217 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 218 // FIXME: Remove these when LegalizeDAGTypes lands. 219 case ISD::ADD: 220 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 221 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 222 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 223 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 224 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 225 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 226 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 227 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 228 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); 229 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); 230 default: 231 llvm_unreachable("unimplemented operand"); 232 } 233 } 234 235 /// ReplaceNodeResults - Replace the results of node with an illegal result 236 /// type with new values built out of custom code. 237 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 238 SmallVectorImpl<SDValue>&Results, 239 SelectionDAG &DAG) const { 240 switch (N->getOpcode()) { 241 default: 242 llvm_unreachable("Don't know how to custom expand this!"); 243 case ISD::ADD: 244 case ISD::SUB: 245 Results.push_back(ExpandADDSUB(N, DAG)); 246 return; 247 } 248 } 249 250 //===----------------------------------------------------------------------===// 251 // Misc Lower Operation implementation 252 //===----------------------------------------------------------------------===// 253 254 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA, 255 const GlobalValue *GV, 256 SelectionDAG &DAG) const { 257 // FIXME there is no actual debug info here 258 SDLoc dl(GA); 259 260 if (GV->getType()->getElementType()->isFunctionTy()) 261 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 262 263 const auto *GVar = dyn_cast<GlobalVariable>(GV); 264 if ((GV->hasSection() && StringRef(GV->getSection()).startswith(".cp.")) || 265 (GVar && GVar->isConstant() && GV->hasLocalLinkage())) 266 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 267 268 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 269 } 270 271 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { 272 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) 273 return true; 274 275 Type *ObjType = GV->getType()->getPointerElementType(); 276 if (!ObjType->isSized()) 277 return false; 278 279 auto &DL = GV->getParent()->getDataLayout(); 280 unsigned ObjSize = DL.getTypeAllocSize(ObjType); 281 return ObjSize < CodeModelLargeSize && ObjSize != 0; 282 } 283 284 SDValue XCoreTargetLowering:: 285 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 286 { 287 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 288 const GlobalValue *GV = GN->getGlobal(); 289 SDLoc DL(GN); 290 int64_t Offset = GN->getOffset(); 291 if (IsSmallObject(GV, *this)) { 292 // We can only fold positive offsets that are a multiple of the word size. 293 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 294 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 295 GA = getGlobalAddressWrapper(GA, GV, DAG); 296 // Handle the rest of the offset. 297 if (Offset != FoldedOffset) { 298 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, DL, MVT::i32); 299 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 300 } 301 return GA; 302 } else { 303 // Ideally we would not fold in offset with an index <= 11. 304 Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); 305 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty); 306 Ty = Type::getInt32Ty(*DAG.getContext()); 307 Constant *Idx = ConstantInt::get(Ty, Offset); 308 Constant *GAI = ConstantExpr::getGetElementPtr( 309 Type::getInt8Ty(*DAG.getContext()), GA, Idx); 310 SDValue CP = DAG.getConstantPool(GAI, MVT::i32); 311 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, 312 DAG.getEntryNode(), CP, MachinePointerInfo(), false, 313 false, false, 0); 314 } 315 } 316 317 SDValue XCoreTargetLowering:: 318 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 319 { 320 SDLoc DL(Op); 321 auto PtrVT = getPointerTy(DAG.getDataLayout()); 322 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 323 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT); 324 325 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, PtrVT, Result); 326 } 327 328 SDValue XCoreTargetLowering:: 329 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 330 { 331 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 332 // FIXME there isn't really debug info here 333 SDLoc dl(CP); 334 EVT PtrVT = Op.getValueType(); 335 SDValue Res; 336 if (CP->isMachineConstantPoolEntry()) { 337 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 338 CP->getAlignment(), CP->getOffset()); 339 } else { 340 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 341 CP->getAlignment(), CP->getOffset()); 342 } 343 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 344 } 345 346 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 347 return MachineJumpTableInfo::EK_Inline; 348 } 349 350 SDValue XCoreTargetLowering:: 351 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 352 { 353 SDValue Chain = Op.getOperand(0); 354 SDValue Table = Op.getOperand(1); 355 SDValue Index = Op.getOperand(2); 356 SDLoc dl(Op); 357 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 358 unsigned JTI = JT->getIndex(); 359 MachineFunction &MF = DAG.getMachineFunction(); 360 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 361 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 362 363 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 364 if (NumEntries <= 32) { 365 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 366 } 367 assert((NumEntries >> 31) == 0); 368 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 369 DAG.getConstant(1, dl, MVT::i32)); 370 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 371 ScaledIndex); 372 } 373 374 SDValue XCoreTargetLowering:: 375 lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base, 376 int64_t Offset, SelectionDAG &DAG) const 377 { 378 auto PtrVT = getPointerTy(DAG.getDataLayout()); 379 if ((Offset & 0x3) == 0) { 380 return DAG.getLoad(PtrVT, DL, Chain, Base, MachinePointerInfo(), false, 381 false, false, 0); 382 } 383 // Lower to pair of consecutive word aligned loads plus some bit shifting. 384 int32_t HighOffset = RoundUpToAlignment(Offset, 4); 385 int32_t LowOffset = HighOffset - 4; 386 SDValue LowAddr, HighAddr; 387 if (GlobalAddressSDNode *GASD = 388 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 389 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 390 LowOffset); 391 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 392 HighOffset); 393 } else { 394 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 395 DAG.getConstant(LowOffset, DL, MVT::i32)); 396 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 397 DAG.getConstant(HighOffset, DL, MVT::i32)); 398 } 399 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, DL, MVT::i32); 400 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, DL, MVT::i32); 401 402 SDValue Low = DAG.getLoad(PtrVT, DL, Chain, LowAddr, MachinePointerInfo(), 403 false, false, false, 0); 404 SDValue High = DAG.getLoad(PtrVT, DL, Chain, HighAddr, MachinePointerInfo(), 405 false, false, false, 0); 406 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 407 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 408 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 409 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 410 High.getValue(1)); 411 SDValue Ops[] = { Result, Chain }; 412 return DAG.getMergeValues(Ops, DL); 413 } 414 415 static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 416 { 417 APInt KnownZero, KnownOne; 418 DAG.computeKnownBits(Value, KnownZero, KnownOne); 419 return KnownZero.countTrailingOnes() >= 2; 420 } 421 422 SDValue XCoreTargetLowering:: 423 LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 424 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 425 LoadSDNode *LD = cast<LoadSDNode>(Op); 426 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 427 "Unexpected extension type"); 428 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 429 if (allowsMisalignedMemoryAccesses(LD->getMemoryVT(), 430 LD->getAddressSpace(), 431 LD->getAlignment())) 432 return SDValue(); 433 434 auto &TD = DAG.getDataLayout(); 435 unsigned ABIAlignment = TD.getABITypeAlignment( 436 LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 437 // Leave aligned load alone. 438 if (LD->getAlignment() >= ABIAlignment) 439 return SDValue(); 440 441 SDValue Chain = LD->getChain(); 442 SDValue BasePtr = LD->getBasePtr(); 443 SDLoc DL(Op); 444 445 if (!LD->isVolatile()) { 446 const GlobalValue *GV; 447 int64_t Offset = 0; 448 if (DAG.isBaseWithConstantOffset(BasePtr) && 449 isWordAligned(BasePtr->getOperand(0), DAG)) { 450 SDValue NewBasePtr = BasePtr->getOperand(0); 451 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 452 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 453 Offset, DAG); 454 } 455 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 456 MinAlign(GV->getAlignment(), 4) == 4) { 457 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 458 BasePtr->getValueType(0)); 459 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 460 Offset, DAG); 461 } 462 } 463 464 if (LD->getAlignment() == 2) { 465 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, 466 BasePtr, LD->getPointerInfo(), MVT::i16, 467 LD->isVolatile(), LD->isNonTemporal(), 468 LD->isInvariant(), 2); 469 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 470 DAG.getConstant(2, DL, MVT::i32)); 471 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 472 HighAddr, 473 LD->getPointerInfo().getWithOffset(2), 474 MVT::i16, LD->isVolatile(), 475 LD->isNonTemporal(), LD->isInvariant(), 2); 476 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 477 DAG.getConstant(16, DL, MVT::i32)); 478 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 479 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 480 High.getValue(1)); 481 SDValue Ops[] = { Result, Chain }; 482 return DAG.getMergeValues(Ops, DL); 483 } 484 485 // Lower to a call to __misaligned_load(BasePtr). 486 Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext()); 487 TargetLowering::ArgListTy Args; 488 TargetLowering::ArgListEntry Entry; 489 490 Entry.Ty = IntPtrTy; 491 Entry.Node = BasePtr; 492 Args.push_back(Entry); 493 494 TargetLowering::CallLoweringInfo CLI(DAG); 495 CLI.setDebugLoc(DL).setChain(Chain).setCallee( 496 CallingConv::C, IntPtrTy, 497 DAG.getExternalSymbol("__misaligned_load", 498 getPointerTy(DAG.getDataLayout())), 499 std::move(Args), 0); 500 501 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 502 SDValue Ops[] = { CallResult.first, CallResult.second }; 503 return DAG.getMergeValues(Ops, DL); 504 } 505 506 SDValue XCoreTargetLowering:: 507 LowerSTORE(SDValue Op, SelectionDAG &DAG) const 508 { 509 StoreSDNode *ST = cast<StoreSDNode>(Op); 510 assert(!ST->isTruncatingStore() && "Unexpected store type"); 511 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 512 if (allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 513 ST->getAddressSpace(), 514 ST->getAlignment())) { 515 return SDValue(); 516 } 517 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 518 ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 519 // Leave aligned store alone. 520 if (ST->getAlignment() >= ABIAlignment) { 521 return SDValue(); 522 } 523 SDValue Chain = ST->getChain(); 524 SDValue BasePtr = ST->getBasePtr(); 525 SDValue Value = ST->getValue(); 526 SDLoc dl(Op); 527 528 if (ST->getAlignment() == 2) { 529 SDValue Low = Value; 530 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 531 DAG.getConstant(16, dl, MVT::i32)); 532 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 533 ST->getPointerInfo(), MVT::i16, 534 ST->isVolatile(), ST->isNonTemporal(), 535 2); 536 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 537 DAG.getConstant(2, dl, MVT::i32)); 538 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 539 ST->getPointerInfo().getWithOffset(2), 540 MVT::i16, ST->isVolatile(), 541 ST->isNonTemporal(), 2); 542 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 543 } 544 545 // Lower to a call to __misaligned_store(BasePtr, Value). 546 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 547 TargetLowering::ArgListTy Args; 548 TargetLowering::ArgListEntry Entry; 549 550 Entry.Ty = IntPtrTy; 551 Entry.Node = BasePtr; 552 Args.push_back(Entry); 553 554 Entry.Node = Value; 555 Args.push_back(Entry); 556 557 TargetLowering::CallLoweringInfo CLI(DAG); 558 CLI.setDebugLoc(dl).setChain(Chain).setCallee( 559 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 560 DAG.getExternalSymbol("__misaligned_store", 561 getPointerTy(DAG.getDataLayout())), 562 std::move(Args), 0); 563 564 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 565 return CallResult.second; 566 } 567 568 SDValue XCoreTargetLowering:: 569 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 570 { 571 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 572 "Unexpected operand to lower!"); 573 SDLoc dl(Op); 574 SDValue LHS = Op.getOperand(0); 575 SDValue RHS = Op.getOperand(1); 576 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 577 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 578 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 579 LHS, RHS); 580 SDValue Lo(Hi.getNode(), 1); 581 SDValue Ops[] = { Lo, Hi }; 582 return DAG.getMergeValues(Ops, dl); 583 } 584 585 SDValue XCoreTargetLowering:: 586 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 587 { 588 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 589 "Unexpected operand to lower!"); 590 SDLoc dl(Op); 591 SDValue LHS = Op.getOperand(0); 592 SDValue RHS = Op.getOperand(1); 593 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 594 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 595 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 596 Zero, Zero); 597 SDValue Lo(Hi.getNode(), 1); 598 SDValue Ops[] = { Lo, Hi }; 599 return DAG.getMergeValues(Ops, dl); 600 } 601 602 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 603 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 604 /// each intermediate result in the calculation must also have a single use. 605 /// If the Op is in the correct form the constituent parts are written to Mul0, 606 /// Mul1, Addend0 and Addend1. 607 static bool 608 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 609 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 610 { 611 if (Op.getOpcode() != ISD::ADD) 612 return false; 613 SDValue N0 = Op.getOperand(0); 614 SDValue N1 = Op.getOperand(1); 615 SDValue AddOp; 616 SDValue OtherOp; 617 if (N0.getOpcode() == ISD::ADD) { 618 AddOp = N0; 619 OtherOp = N1; 620 } else if (N1.getOpcode() == ISD::ADD) { 621 AddOp = N1; 622 OtherOp = N0; 623 } else { 624 return false; 625 } 626 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 627 return false; 628 if (OtherOp.getOpcode() == ISD::MUL) { 629 // add(add(a,b),mul(x,y)) 630 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 631 return false; 632 Mul0 = OtherOp.getOperand(0); 633 Mul1 = OtherOp.getOperand(1); 634 Addend0 = AddOp.getOperand(0); 635 Addend1 = AddOp.getOperand(1); 636 return true; 637 } 638 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 639 // add(add(mul(x,y),a),b) 640 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 641 return false; 642 Mul0 = AddOp.getOperand(0).getOperand(0); 643 Mul1 = AddOp.getOperand(0).getOperand(1); 644 Addend0 = AddOp.getOperand(1); 645 Addend1 = OtherOp; 646 return true; 647 } 648 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 649 // add(add(a,mul(x,y)),b) 650 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 651 return false; 652 Mul0 = AddOp.getOperand(1).getOperand(0); 653 Mul1 = AddOp.getOperand(1).getOperand(1); 654 Addend0 = AddOp.getOperand(0); 655 Addend1 = OtherOp; 656 return true; 657 } 658 return false; 659 } 660 661 SDValue XCoreTargetLowering:: 662 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 663 { 664 SDValue Mul; 665 SDValue Other; 666 if (N->getOperand(0).getOpcode() == ISD::MUL) { 667 Mul = N->getOperand(0); 668 Other = N->getOperand(1); 669 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 670 Mul = N->getOperand(1); 671 Other = N->getOperand(0); 672 } else { 673 return SDValue(); 674 } 675 SDLoc dl(N); 676 SDValue LL, RL, AddendL, AddendH; 677 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 678 Mul.getOperand(0), DAG.getConstant(0, dl, MVT::i32)); 679 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 680 Mul.getOperand(1), DAG.getConstant(0, dl, MVT::i32)); 681 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 682 Other, DAG.getConstant(0, dl, MVT::i32)); 683 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 684 Other, DAG.getConstant(1, dl, MVT::i32)); 685 APInt HighMask = APInt::getHighBitsSet(64, 32); 686 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 687 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 688 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 689 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 690 // The inputs are both zero-extended. 691 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 692 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 693 AddendL, LL, RL); 694 SDValue Lo(Hi.getNode(), 1); 695 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 696 } 697 if (LHSSB > 32 && RHSSB > 32) { 698 // The inputs are both sign-extended. 699 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 700 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 701 AddendL, LL, RL); 702 SDValue Lo(Hi.getNode(), 1); 703 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 704 } 705 SDValue LH, RH; 706 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 707 Mul.getOperand(0), DAG.getConstant(1, dl, MVT::i32)); 708 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 709 Mul.getOperand(1), DAG.getConstant(1, dl, MVT::i32)); 710 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 711 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 712 AddendL, LL, RL); 713 SDValue Lo(Hi.getNode(), 1); 714 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 715 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 716 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 717 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 718 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 719 } 720 721 SDValue XCoreTargetLowering:: 722 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 723 { 724 assert(N->getValueType(0) == MVT::i64 && 725 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 726 "Unknown operand to lower!"); 727 728 if (N->getOpcode() == ISD::ADD) { 729 SDValue Result = TryExpandADDWithMul(N, DAG); 730 if (Result.getNode()) 731 return Result; 732 } 733 734 SDLoc dl(N); 735 736 // Extract components 737 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 738 N->getOperand(0), 739 DAG.getConstant(0, dl, MVT::i32)); 740 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 741 N->getOperand(0), 742 DAG.getConstant(1, dl, MVT::i32)); 743 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 744 N->getOperand(1), 745 DAG.getConstant(0, dl, MVT::i32)); 746 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 747 N->getOperand(1), 748 DAG.getConstant(1, dl, MVT::i32)); 749 750 // Expand 751 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 752 XCoreISD::LSUB; 753 SDValue Zero = DAG.getConstant(0, dl, MVT::i32); 754 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 755 LHSL, RHSL, Zero); 756 SDValue Carry(Lo.getNode(), 1); 757 758 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 759 LHSH, RHSH, Carry); 760 SDValue Ignored(Hi.getNode(), 1); 761 // Merge the pieces 762 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 763 } 764 765 SDValue XCoreTargetLowering:: 766 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 767 { 768 // Whist llvm does not support aggregate varargs we can ignore 769 // the possibility of the ValueType being an implicit byVal vararg. 770 SDNode *Node = Op.getNode(); 771 EVT VT = Node->getValueType(0); // not an aggregate 772 SDValue InChain = Node->getOperand(0); 773 SDValue VAListPtr = Node->getOperand(1); 774 EVT PtrVT = VAListPtr.getValueType(); 775 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 776 SDLoc dl(Node); 777 SDValue VAList = DAG.getLoad(PtrVT, dl, InChain, 778 VAListPtr, MachinePointerInfo(SV), 779 false, false, false, 0); 780 // Increment the pointer, VAList, to the next vararg 781 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 782 DAG.getIntPtrConstant(VT.getSizeInBits() / 8, 783 dl)); 784 // Store the incremented VAList to the legalized pointer 785 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 786 MachinePointerInfo(SV), false, false, 0); 787 // Load the actual argument out of the pointer VAList 788 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(), 789 false, false, false, 0); 790 } 791 792 SDValue XCoreTargetLowering:: 793 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 794 { 795 SDLoc dl(Op); 796 // vastart stores the address of the VarArgsFrameIndex slot into the 797 // memory location argument 798 MachineFunction &MF = DAG.getMachineFunction(); 799 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 800 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 801 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 802 MachinePointerInfo(), false, false, 0); 803 } 804 805 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 806 SelectionDAG &DAG) const { 807 // This nodes represent llvm.frameaddress on the DAG. 808 // It takes one operand, the index of the frame address to return. 809 // An index of zero corresponds to the current function's frame address. 810 // An index of one to the parent's frame address, and so on. 811 // Depths > 0 not supported yet! 812 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 813 return SDValue(); 814 815 MachineFunction &MF = DAG.getMachineFunction(); 816 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 817 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), 818 RegInfo->getFrameRegister(MF), MVT::i32); 819 } 820 821 SDValue XCoreTargetLowering:: 822 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 823 // This nodes represent llvm.returnaddress on the DAG. 824 // It takes one operand, the index of the return address to return. 825 // An index of zero corresponds to the current function's return address. 826 // An index of one to the parent's return address, and so on. 827 // Depths > 0 not supported yet! 828 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 829 return SDValue(); 830 831 MachineFunction &MF = DAG.getMachineFunction(); 832 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 833 int FI = XFI->createLRSpillSlot(MF); 834 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 835 return DAG.getLoad( 836 getPointerTy(DAG.getDataLayout()), SDLoc(Op), DAG.getEntryNode(), FIN, 837 MachinePointerInfo::getFixedStack(MF, FI), false, false, false, 0); 838 } 839 840 SDValue XCoreTargetLowering:: 841 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { 842 // This node represents offset from frame pointer to first on-stack argument. 843 // This is needed for correct stack adjustment during unwind. 844 // However, we don't know the offset until after the frame has be finalised. 845 // This is done during the XCoreFTAOElim pass. 846 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); 847 } 848 849 SDValue XCoreTargetLowering:: 850 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 851 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) 852 // This node represents 'eh_return' gcc dwarf builtin, which is used to 853 // return from exception. The general meaning is: adjust stack by OFFSET and 854 // pass execution to HANDLER. 855 MachineFunction &MF = DAG.getMachineFunction(); 856 SDValue Chain = Op.getOperand(0); 857 SDValue Offset = Op.getOperand(1); 858 SDValue Handler = Op.getOperand(2); 859 SDLoc dl(Op); 860 861 // Absolute SP = (FP + FrameToArgs) + Offset 862 const TargetRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); 863 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 864 RegInfo->getFrameRegister(MF), MVT::i32); 865 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, 866 MVT::i32); 867 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); 868 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); 869 870 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister 871 // which leaves 2 caller saved registers, R2 & R3 for us to use. 872 unsigned StackReg = XCore::R2; 873 unsigned HandlerReg = XCore::R3; 874 875 SDValue OutChains[] = { 876 DAG.getCopyToReg(Chain, dl, StackReg, Stack), 877 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) 878 }; 879 880 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 881 882 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, 883 DAG.getRegister(StackReg, MVT::i32), 884 DAG.getRegister(HandlerReg, MVT::i32)); 885 886 } 887 888 SDValue XCoreTargetLowering:: 889 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 890 return Op.getOperand(0); 891 } 892 893 SDValue XCoreTargetLowering:: 894 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 895 SDValue Chain = Op.getOperand(0); 896 SDValue Trmp = Op.getOperand(1); // trampoline 897 SDValue FPtr = Op.getOperand(2); // nested function 898 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 899 900 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 901 902 // .align 4 903 // LDAPF_u10 r11, nest 904 // LDW_2rus r11, r11[0] 905 // STWSP_ru6 r11, sp[0] 906 // LDAPF_u10 r11, fptr 907 // LDW_2rus r11, r11[0] 908 // BAU_1r r11 909 // nest: 910 // .word nest 911 // fptr: 912 // .word fptr 913 SDValue OutChains[5]; 914 915 SDValue Addr = Trmp; 916 917 SDLoc dl(Op); 918 OutChains[0] = DAG.getStore(Chain, dl, 919 DAG.getConstant(0x0a3cd805, dl, MVT::i32), Addr, 920 MachinePointerInfo(TrmpAddr), false, false, 0); 921 922 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 923 DAG.getConstant(4, dl, MVT::i32)); 924 OutChains[1] = DAG.getStore(Chain, dl, 925 DAG.getConstant(0xd80456c0, dl, MVT::i32), Addr, 926 MachinePointerInfo(TrmpAddr, 4), false, false, 0); 927 928 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 929 DAG.getConstant(8, dl, MVT::i32)); 930 OutChains[2] = DAG.getStore(Chain, dl, 931 DAG.getConstant(0x27fb0a3c, dl, MVT::i32), Addr, 932 MachinePointerInfo(TrmpAddr, 8), false, false, 0); 933 934 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 935 DAG.getConstant(12, dl, MVT::i32)); 936 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr, 937 MachinePointerInfo(TrmpAddr, 12), false, false, 938 0); 939 940 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 941 DAG.getConstant(16, dl, MVT::i32)); 942 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr, 943 MachinePointerInfo(TrmpAddr, 16), false, false, 944 0); 945 946 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 947 } 948 949 SDValue XCoreTargetLowering:: 950 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 951 SDLoc DL(Op); 952 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 953 switch (IntNo) { 954 case Intrinsic::xcore_crc8: 955 EVT VT = Op.getValueType(); 956 SDValue Data = 957 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 958 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 959 SDValue Crc(Data.getNode(), 1); 960 SDValue Results[] = { Crc, Data }; 961 return DAG.getMergeValues(Results, DL); 962 } 963 return SDValue(); 964 } 965 966 SDValue XCoreTargetLowering:: 967 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 968 SDLoc DL(Op); 969 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 970 } 971 972 SDValue XCoreTargetLowering:: 973 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { 974 AtomicSDNode *N = cast<AtomicSDNode>(Op); 975 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); 976 assert(N->getOrdering() <= Monotonic && 977 "setInsertFencesForAtomic(true) and yet greater than Monotonic"); 978 if (N->getMemoryVT() == MVT::i32) { 979 if (N->getAlignment() < 4) 980 report_fatal_error("atomic load must be aligned"); 981 return DAG.getLoad(getPointerTy(DAG.getDataLayout()), SDLoc(Op), 982 N->getChain(), N->getBasePtr(), N->getPointerInfo(), 983 N->isVolatile(), N->isNonTemporal(), N->isInvariant(), 984 N->getAlignment(), N->getAAInfo(), N->getRanges()); 985 } 986 if (N->getMemoryVT() == MVT::i16) { 987 if (N->getAlignment() < 2) 988 report_fatal_error("atomic load must be aligned"); 989 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 990 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 991 N->isVolatile(), N->isNonTemporal(), 992 N->isInvariant(), N->getAlignment(), N->getAAInfo()); 993 } 994 if (N->getMemoryVT() == MVT::i8) 995 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 996 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 997 N->isVolatile(), N->isNonTemporal(), 998 N->isInvariant(), N->getAlignment(), N->getAAInfo()); 999 return SDValue(); 1000 } 1001 1002 SDValue XCoreTargetLowering:: 1003 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { 1004 AtomicSDNode *N = cast<AtomicSDNode>(Op); 1005 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); 1006 assert(N->getOrdering() <= Monotonic && 1007 "setInsertFencesForAtomic(true) and yet greater than Monotonic"); 1008 if (N->getMemoryVT() == MVT::i32) { 1009 if (N->getAlignment() < 4) 1010 report_fatal_error("atomic store must be aligned"); 1011 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), 1012 N->getBasePtr(), N->getPointerInfo(), 1013 N->isVolatile(), N->isNonTemporal(), 1014 N->getAlignment(), N->getAAInfo()); 1015 } 1016 if (N->getMemoryVT() == MVT::i16) { 1017 if (N->getAlignment() < 2) 1018 report_fatal_error("atomic store must be aligned"); 1019 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1020 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 1021 N->isVolatile(), N->isNonTemporal(), 1022 N->getAlignment(), N->getAAInfo()); 1023 } 1024 if (N->getMemoryVT() == MVT::i8) 1025 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1026 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 1027 N->isVolatile(), N->isNonTemporal(), 1028 N->getAlignment(), N->getAAInfo()); 1029 return SDValue(); 1030 } 1031 1032 //===----------------------------------------------------------------------===// 1033 // Calling Convention Implementation 1034 //===----------------------------------------------------------------------===// 1035 1036 #include "XCoreGenCallingConv.inc" 1037 1038 //===----------------------------------------------------------------------===// 1039 // Call Calling Convention Implementation 1040 //===----------------------------------------------------------------------===// 1041 1042 /// XCore call implementation 1043 SDValue 1044 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1045 SmallVectorImpl<SDValue> &InVals) const { 1046 SelectionDAG &DAG = CLI.DAG; 1047 SDLoc &dl = CLI.DL; 1048 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1049 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1050 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1051 SDValue Chain = CLI.Chain; 1052 SDValue Callee = CLI.Callee; 1053 bool &isTailCall = CLI.IsTailCall; 1054 CallingConv::ID CallConv = CLI.CallConv; 1055 bool isVarArg = CLI.IsVarArg; 1056 1057 // XCore target does not yet support tail call optimization. 1058 isTailCall = false; 1059 1060 // For now, only CallingConv::C implemented 1061 switch (CallConv) 1062 { 1063 default: 1064 llvm_unreachable("Unsupported calling convention"); 1065 case CallingConv::Fast: 1066 case CallingConv::C: 1067 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 1068 Outs, OutVals, Ins, dl, DAG, InVals); 1069 } 1070 } 1071 1072 /// LowerCallResult - Lower the result values of a call into the 1073 /// appropriate copies out of appropriate physical registers / memory locations. 1074 static SDValue 1075 LowerCallResult(SDValue Chain, SDValue InFlag, 1076 const SmallVectorImpl<CCValAssign> &RVLocs, 1077 SDLoc dl, SelectionDAG &DAG, 1078 SmallVectorImpl<SDValue> &InVals) { 1079 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 1080 // Copy results out of physical registers. 1081 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1082 const CCValAssign &VA = RVLocs[i]; 1083 if (VA.isRegLoc()) { 1084 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), 1085 InFlag).getValue(1); 1086 InFlag = Chain.getValue(2); 1087 InVals.push_back(Chain.getValue(0)); 1088 } else { 1089 assert(VA.isMemLoc()); 1090 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), 1091 InVals.size())); 1092 // Reserve space for this result. 1093 InVals.push_back(SDValue()); 1094 } 1095 } 1096 1097 // Copy results out of memory. 1098 SmallVector<SDValue, 4> MemOpChains; 1099 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 1100 int offset = ResultMemLocs[i].first; 1101 unsigned index = ResultMemLocs[i].second; 1102 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 1103 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, dl, MVT::i32) }; 1104 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops); 1105 InVals[index] = load; 1106 MemOpChains.push_back(load.getValue(1)); 1107 } 1108 1109 // Transform all loads nodes into one single node because 1110 // all load nodes are independent of each other. 1111 if (!MemOpChains.empty()) 1112 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1113 1114 return Chain; 1115 } 1116 1117 /// LowerCCCCallTo - functions arguments are copied from virtual 1118 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 1119 /// CALLSEQ_END are emitted. 1120 /// TODO: isTailCall, sret. 1121 SDValue 1122 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 1123 CallingConv::ID CallConv, bool isVarArg, 1124 bool isTailCall, 1125 const SmallVectorImpl<ISD::OutputArg> &Outs, 1126 const SmallVectorImpl<SDValue> &OutVals, 1127 const SmallVectorImpl<ISD::InputArg> &Ins, 1128 SDLoc dl, SelectionDAG &DAG, 1129 SmallVectorImpl<SDValue> &InVals) const { 1130 1131 // Analyze operands of the call, assigning locations to each operand. 1132 SmallVector<CCValAssign, 16> ArgLocs; 1133 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1134 *DAG.getContext()); 1135 1136 // The ABI dictates there should be one stack slot available to the callee 1137 // on function entry (for saving lr). 1138 CCInfo.AllocateStack(4, 4); 1139 1140 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 1141 1142 SmallVector<CCValAssign, 16> RVLocs; 1143 // Analyze return values to determine the number of bytes of stack required. 1144 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1145 *DAG.getContext()); 1146 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); 1147 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1148 1149 // Get a count of how many bytes are to be pushed on the stack. 1150 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 1151 auto PtrVT = getPointerTy(DAG.getDataLayout()); 1152 1153 Chain = DAG.getCALLSEQ_START(Chain, 1154 DAG.getConstant(NumBytes, dl, PtrVT, true), dl); 1155 1156 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 1157 SmallVector<SDValue, 12> MemOpChains; 1158 1159 // Walk the register/memloc assignments, inserting copies/loads. 1160 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1161 CCValAssign &VA = ArgLocs[i]; 1162 SDValue Arg = OutVals[i]; 1163 1164 // Promote the value if needed. 1165 switch (VA.getLocInfo()) { 1166 default: llvm_unreachable("Unknown loc info!"); 1167 case CCValAssign::Full: break; 1168 case CCValAssign::SExt: 1169 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1170 break; 1171 case CCValAssign::ZExt: 1172 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1173 break; 1174 case CCValAssign::AExt: 1175 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1176 break; 1177 } 1178 1179 // Arguments that can be passed on register must be kept at 1180 // RegsToPass vector 1181 if (VA.isRegLoc()) { 1182 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1183 } else { 1184 assert(VA.isMemLoc()); 1185 1186 int Offset = VA.getLocMemOffset(); 1187 1188 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 1189 Chain, Arg, 1190 DAG.getConstant(Offset/4, dl, 1191 MVT::i32))); 1192 } 1193 } 1194 1195 // Transform all store nodes into one single node because 1196 // all store nodes are independent of each other. 1197 if (!MemOpChains.empty()) 1198 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1199 1200 // Build a sequence of copy-to-reg nodes chained together with token 1201 // chain and flag operands which copy the outgoing args into registers. 1202 // The InFlag in necessary since all emitted instructions must be 1203 // stuck together. 1204 SDValue InFlag; 1205 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1206 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1207 RegsToPass[i].second, InFlag); 1208 InFlag = Chain.getValue(1); 1209 } 1210 1211 // If the callee is a GlobalAddress node (quite common, every direct call is) 1212 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1213 // Likewise ExternalSymbol -> TargetExternalSymbol. 1214 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1215 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 1216 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1217 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 1218 1219 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1220 // = Chain, Callee, Reg#1, Reg#2, ... 1221 // 1222 // Returns a chain & a flag for retval copy to use. 1223 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1224 SmallVector<SDValue, 8> Ops; 1225 Ops.push_back(Chain); 1226 Ops.push_back(Callee); 1227 1228 // Add argument registers to the end of the list so that they are 1229 // known live into the call. 1230 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1231 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1232 RegsToPass[i].second.getValueType())); 1233 1234 if (InFlag.getNode()) 1235 Ops.push_back(InFlag); 1236 1237 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops); 1238 InFlag = Chain.getValue(1); 1239 1240 // Create the CALLSEQ_END node. 1241 Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, dl, PtrVT, true), 1242 DAG.getConstant(0, dl, PtrVT, true), InFlag, dl); 1243 InFlag = Chain.getValue(1); 1244 1245 // Handle result values, copying them out of physregs into vregs that we 1246 // return. 1247 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); 1248 } 1249 1250 //===----------------------------------------------------------------------===// 1251 // Formal Arguments Calling Convention Implementation 1252 //===----------------------------------------------------------------------===// 1253 1254 namespace { 1255 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1256 } 1257 1258 /// XCore formal arguments implementation 1259 SDValue 1260 XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 1261 CallingConv::ID CallConv, 1262 bool isVarArg, 1263 const SmallVectorImpl<ISD::InputArg> &Ins, 1264 SDLoc dl, 1265 SelectionDAG &DAG, 1266 SmallVectorImpl<SDValue> &InVals) 1267 const { 1268 switch (CallConv) 1269 { 1270 default: 1271 llvm_unreachable("Unsupported calling convention"); 1272 case CallingConv::C: 1273 case CallingConv::Fast: 1274 return LowerCCCArguments(Chain, CallConv, isVarArg, 1275 Ins, dl, DAG, InVals); 1276 } 1277 } 1278 1279 /// LowerCCCArguments - transform physical registers into 1280 /// virtual registers and generate load operations for 1281 /// arguments places on the stack. 1282 /// TODO: sret 1283 SDValue 1284 XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 1285 CallingConv::ID CallConv, 1286 bool isVarArg, 1287 const SmallVectorImpl<ISD::InputArg> 1288 &Ins, 1289 SDLoc dl, 1290 SelectionDAG &DAG, 1291 SmallVectorImpl<SDValue> &InVals) const { 1292 MachineFunction &MF = DAG.getMachineFunction(); 1293 MachineFrameInfo *MFI = MF.getFrameInfo(); 1294 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1295 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1296 1297 // Assign locations to all of the incoming arguments. 1298 SmallVector<CCValAssign, 16> ArgLocs; 1299 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1300 *DAG.getContext()); 1301 1302 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1303 1304 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1305 1306 unsigned LRSaveSize = StackSlotSize; 1307 1308 if (!isVarArg) 1309 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); 1310 1311 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1312 // scheduler clobbering a register before it has been copied. 1313 // The stages are: 1314 // 1. CopyFromReg (and load) arg & vararg registers. 1315 // 2. Chain CopyFromReg nodes into a TokenFactor. 1316 // 3. Memcpy 'byVal' args & push final InVals. 1317 // 4. Chain mem ops nodes into a TokenFactor. 1318 SmallVector<SDValue, 4> CFRegNode; 1319 SmallVector<ArgDataPair, 4> ArgData; 1320 SmallVector<SDValue, 4> MemOps; 1321 1322 // 1a. CopyFromReg (and load) arg registers. 1323 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1324 1325 CCValAssign &VA = ArgLocs[i]; 1326 SDValue ArgIn; 1327 1328 if (VA.isRegLoc()) { 1329 // Arguments passed in registers 1330 EVT RegVT = VA.getLocVT(); 1331 switch (RegVT.getSimpleVT().SimpleTy) { 1332 default: 1333 { 1334 #ifndef NDEBUG 1335 errs() << "LowerFormalArguments Unhandled argument type: " 1336 << RegVT.getSimpleVT().SimpleTy << "\n"; 1337 #endif 1338 llvm_unreachable(nullptr); 1339 } 1340 case MVT::i32: 1341 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1342 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1343 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1344 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1345 } 1346 } else { 1347 // sanity check 1348 assert(VA.isMemLoc()); 1349 // Load the argument to a virtual register 1350 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1351 if (ObjSize > StackSlotSize) { 1352 errs() << "LowerFormalArguments Unhandled argument type: " 1353 << EVT(VA.getLocVT()).getEVTString() 1354 << "\n"; 1355 } 1356 // Create the frame index object for this incoming parameter... 1357 int FI = MFI->CreateFixedObject(ObjSize, 1358 LRSaveSize + VA.getLocMemOffset(), 1359 true); 1360 1361 // Create the SelectionDAG nodes corresponding to a load 1362 //from this parameter 1363 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1364 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1365 MachinePointerInfo::getFixedStack(MF, FI), false, 1366 false, false, 0); 1367 } 1368 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1369 ArgData.push_back(ADP); 1370 } 1371 1372 // 1b. CopyFromReg vararg registers. 1373 if (isVarArg) { 1374 // Argument registers 1375 static const MCPhysReg ArgRegs[] = { 1376 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1377 }; 1378 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1379 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs); 1380 if (FirstVAReg < array_lengthof(ArgRegs)) { 1381 int offset = 0; 1382 // Save remaining registers, storing higher register numbers at a higher 1383 // address 1384 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1385 // Create a stack slot 1386 int FI = MFI->CreateFixedObject(4, offset, true); 1387 if (i == (int)FirstVAReg) { 1388 XFI->setVarArgsFrameIndex(FI); 1389 } 1390 offset -= StackSlotSize; 1391 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1392 // Move argument from phys reg -> virt reg 1393 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1394 RegInfo.addLiveIn(ArgRegs[i], VReg); 1395 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1396 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1397 // Move argument from virt reg -> stack 1398 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1399 MachinePointerInfo(), false, false, 0); 1400 MemOps.push_back(Store); 1401 } 1402 } else { 1403 // This will point to the next argument passed via stack. 1404 XFI->setVarArgsFrameIndex( 1405 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1406 true)); 1407 } 1408 } 1409 1410 // 2. chain CopyFromReg nodes into a TokenFactor. 1411 if (!CFRegNode.empty()) 1412 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 1413 1414 // 3. Memcpy 'byVal' args & push final InVals. 1415 // Aggregates passed "byVal" need to be copied by the callee. 1416 // The callee will use a pointer to this copy, rather than the original 1417 // pointer. 1418 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1419 ArgDE = ArgData.end(); 1420 ArgDI != ArgDE; ++ArgDI) { 1421 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1422 unsigned Size = ArgDI->Flags.getByValSize(); 1423 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign()); 1424 // Create a new object on the stack and copy the pointee into it. 1425 int FI = MFI->CreateStackObject(Size, Align, false); 1426 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1427 InVals.push_back(FIN); 1428 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, 1429 DAG.getConstant(Size, dl, MVT::i32), 1430 Align, false, false, false, 1431 MachinePointerInfo(), 1432 MachinePointerInfo())); 1433 } else { 1434 InVals.push_back(ArgDI->SDV); 1435 } 1436 } 1437 1438 // 4, chain mem ops nodes into a TokenFactor. 1439 if (!MemOps.empty()) { 1440 MemOps.push_back(Chain); 1441 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 1442 } 1443 1444 return Chain; 1445 } 1446 1447 //===----------------------------------------------------------------------===// 1448 // Return Value Calling Convention Implementation 1449 //===----------------------------------------------------------------------===// 1450 1451 bool XCoreTargetLowering:: 1452 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1453 bool isVarArg, 1454 const SmallVectorImpl<ISD::OutputArg> &Outs, 1455 LLVMContext &Context) const { 1456 SmallVector<CCValAssign, 16> RVLocs; 1457 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 1458 if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) 1459 return false; 1460 if (CCInfo.getNextStackOffset() != 0 && isVarArg) 1461 return false; 1462 return true; 1463 } 1464 1465 SDValue 1466 XCoreTargetLowering::LowerReturn(SDValue Chain, 1467 CallingConv::ID CallConv, bool isVarArg, 1468 const SmallVectorImpl<ISD::OutputArg> &Outs, 1469 const SmallVectorImpl<SDValue> &OutVals, 1470 SDLoc dl, SelectionDAG &DAG) const { 1471 1472 XCoreFunctionInfo *XFI = 1473 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>(); 1474 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1475 1476 // CCValAssign - represent the assignment of 1477 // the return value to a location 1478 SmallVector<CCValAssign, 16> RVLocs; 1479 1480 // CCState - Info about the registers and stack slot. 1481 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1482 *DAG.getContext()); 1483 1484 // Analyze return values. 1485 if (!isVarArg) 1486 CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4); 1487 1488 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1489 1490 SDValue Flag; 1491 SmallVector<SDValue, 4> RetOps(1, Chain); 1492 1493 // Return on XCore is always a "retsp 0" 1494 RetOps.push_back(DAG.getConstant(0, dl, MVT::i32)); 1495 1496 SmallVector<SDValue, 4> MemOpChains; 1497 // Handle return values that must be copied to memory. 1498 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1499 CCValAssign &VA = RVLocs[i]; 1500 if (VA.isRegLoc()) 1501 continue; 1502 assert(VA.isMemLoc()); 1503 if (isVarArg) { 1504 report_fatal_error("Can't return value from vararg function in memory"); 1505 } 1506 1507 int Offset = VA.getLocMemOffset(); 1508 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; 1509 // Create the frame index object for the memory location. 1510 int FI = MFI->CreateFixedObject(ObjSize, Offset, false); 1511 1512 // Create a SelectionDAG node corresponding to a store 1513 // to this memory location. 1514 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1515 MemOpChains.push_back(DAG.getStore( 1516 Chain, dl, OutVals[i], FIN, 1517 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 1518 false, 0)); 1519 } 1520 1521 // Transform all store nodes into one single node because 1522 // all stores are independent of each other. 1523 if (!MemOpChains.empty()) 1524 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1525 1526 // Now handle return values copied to registers. 1527 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1528 CCValAssign &VA = RVLocs[i]; 1529 if (!VA.isRegLoc()) 1530 continue; 1531 // Copy the result values into the output registers. 1532 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1533 1534 // guarantee that all emitted copies are 1535 // stuck together, avoiding something bad 1536 Flag = Chain.getValue(1); 1537 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1538 } 1539 1540 RetOps[0] = Chain; // Update chain. 1541 1542 // Add the flag if we have it. 1543 if (Flag.getNode()) 1544 RetOps.push_back(Flag); 1545 1546 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps); 1547 } 1548 1549 //===----------------------------------------------------------------------===// 1550 // Other Lowering Code 1551 //===----------------------------------------------------------------------===// 1552 1553 MachineBasicBlock * 1554 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1555 MachineBasicBlock *BB) const { 1556 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1557 DebugLoc dl = MI->getDebugLoc(); 1558 assert((MI->getOpcode() == XCore::SELECT_CC) && 1559 "Unexpected instr type to insert"); 1560 1561 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1562 // control-flow pattern. The incoming instruction knows the destination vreg 1563 // to set, the condition code register to branch on, the true/false values to 1564 // select between, and a branch opcode to use. 1565 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1566 MachineFunction::iterator It = ++BB->getIterator(); 1567 1568 // thisMBB: 1569 // ... 1570 // TrueVal = ... 1571 // cmpTY ccX, r1, r2 1572 // bCC copy1MBB 1573 // fallthrough --> copy0MBB 1574 MachineBasicBlock *thisMBB = BB; 1575 MachineFunction *F = BB->getParent(); 1576 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1577 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1578 F->insert(It, copy0MBB); 1579 F->insert(It, sinkMBB); 1580 1581 // Transfer the remainder of BB and its successor edges to sinkMBB. 1582 sinkMBB->splice(sinkMBB->begin(), BB, 1583 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1584 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1585 1586 // Next, add the true and fallthrough blocks as its successors. 1587 BB->addSuccessor(copy0MBB); 1588 BB->addSuccessor(sinkMBB); 1589 1590 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1591 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1592 1593 // copy0MBB: 1594 // %FalseValue = ... 1595 // # fallthrough to sinkMBB 1596 BB = copy0MBB; 1597 1598 // Update machine-CFG edges 1599 BB->addSuccessor(sinkMBB); 1600 1601 // sinkMBB: 1602 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1603 // ... 1604 BB = sinkMBB; 1605 BuildMI(*BB, BB->begin(), dl, 1606 TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1607 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1608 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1609 1610 MI->eraseFromParent(); // The pseudo instruction is gone now. 1611 return BB; 1612 } 1613 1614 //===----------------------------------------------------------------------===// 1615 // Target Optimization Hooks 1616 //===----------------------------------------------------------------------===// 1617 1618 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1619 DAGCombinerInfo &DCI) const { 1620 SelectionDAG &DAG = DCI.DAG; 1621 SDLoc dl(N); 1622 switch (N->getOpcode()) { 1623 default: break; 1624 case ISD::INTRINSIC_VOID: 1625 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 1626 case Intrinsic::xcore_outt: 1627 case Intrinsic::xcore_outct: 1628 case Intrinsic::xcore_chkct: { 1629 SDValue OutVal = N->getOperand(3); 1630 // These instructions ignore the high bits. 1631 if (OutVal.hasOneUse()) { 1632 unsigned BitWidth = OutVal.getValueSizeInBits(); 1633 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 1634 APInt KnownZero, KnownOne; 1635 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1636 !DCI.isBeforeLegalizeOps()); 1637 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1638 if (TLO.ShrinkDemandedConstant(OutVal, DemandedMask) || 1639 TLI.SimplifyDemandedBits(OutVal, DemandedMask, KnownZero, KnownOne, 1640 TLO)) 1641 DCI.CommitTargetLoweringOpt(TLO); 1642 } 1643 break; 1644 } 1645 case Intrinsic::xcore_setpt: { 1646 SDValue Time = N->getOperand(3); 1647 // This instruction ignores the high bits. 1648 if (Time.hasOneUse()) { 1649 unsigned BitWidth = Time.getValueSizeInBits(); 1650 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 1651 APInt KnownZero, KnownOne; 1652 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1653 !DCI.isBeforeLegalizeOps()); 1654 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1655 if (TLO.ShrinkDemandedConstant(Time, DemandedMask) || 1656 TLI.SimplifyDemandedBits(Time, DemandedMask, KnownZero, KnownOne, 1657 TLO)) 1658 DCI.CommitTargetLoweringOpt(TLO); 1659 } 1660 break; 1661 } 1662 } 1663 break; 1664 case XCoreISD::LADD: { 1665 SDValue N0 = N->getOperand(0); 1666 SDValue N1 = N->getOperand(1); 1667 SDValue N2 = N->getOperand(2); 1668 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1669 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1670 EVT VT = N0.getValueType(); 1671 1672 // canonicalize constant to RHS 1673 if (N0C && !N1C) 1674 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1675 1676 // fold (ladd 0, 0, x) -> 0, x & 1 1677 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1678 SDValue Carry = DAG.getConstant(0, dl, VT); 1679 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1680 DAG.getConstant(1, dl, VT)); 1681 SDValue Ops[] = { Result, Carry }; 1682 return DAG.getMergeValues(Ops, dl); 1683 } 1684 1685 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1686 // low bit set 1687 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1688 APInt KnownZero, KnownOne; 1689 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1690 VT.getSizeInBits() - 1); 1691 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1692 if ((KnownZero & Mask) == Mask) { 1693 SDValue Carry = DAG.getConstant(0, dl, VT); 1694 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1695 SDValue Ops[] = { Result, Carry }; 1696 return DAG.getMergeValues(Ops, dl); 1697 } 1698 } 1699 } 1700 break; 1701 case XCoreISD::LSUB: { 1702 SDValue N0 = N->getOperand(0); 1703 SDValue N1 = N->getOperand(1); 1704 SDValue N2 = N->getOperand(2); 1705 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1706 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1707 EVT VT = N0.getValueType(); 1708 1709 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1710 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1711 APInt KnownZero, KnownOne; 1712 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1713 VT.getSizeInBits() - 1); 1714 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1715 if ((KnownZero & Mask) == Mask) { 1716 SDValue Borrow = N2; 1717 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1718 DAG.getConstant(0, dl, VT), N2); 1719 SDValue Ops[] = { Result, Borrow }; 1720 return DAG.getMergeValues(Ops, dl); 1721 } 1722 } 1723 1724 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1725 // low bit set 1726 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1727 APInt KnownZero, KnownOne; 1728 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1729 VT.getSizeInBits() - 1); 1730 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1731 if ((KnownZero & Mask) == Mask) { 1732 SDValue Borrow = DAG.getConstant(0, dl, VT); 1733 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1734 SDValue Ops[] = { Result, Borrow }; 1735 return DAG.getMergeValues(Ops, dl); 1736 } 1737 } 1738 } 1739 break; 1740 case XCoreISD::LMUL: { 1741 SDValue N0 = N->getOperand(0); 1742 SDValue N1 = N->getOperand(1); 1743 SDValue N2 = N->getOperand(2); 1744 SDValue N3 = N->getOperand(3); 1745 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1746 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1747 EVT VT = N0.getValueType(); 1748 // Canonicalize multiplicative constant to RHS. If both multiplicative 1749 // operands are constant canonicalize smallest to RHS. 1750 if ((N0C && !N1C) || 1751 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1752 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1753 N1, N0, N2, N3); 1754 1755 // lmul(x, 0, a, b) 1756 if (N1C && N1C->isNullValue()) { 1757 // If the high result is unused fold to add(a, b) 1758 if (N->hasNUsesOfValue(0, 0)) { 1759 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1760 SDValue Ops[] = { Lo, Lo }; 1761 return DAG.getMergeValues(Ops, dl); 1762 } 1763 // Otherwise fold to ladd(a, b, 0) 1764 SDValue Result = 1765 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1766 SDValue Carry(Result.getNode(), 1); 1767 SDValue Ops[] = { Carry, Result }; 1768 return DAG.getMergeValues(Ops, dl); 1769 } 1770 } 1771 break; 1772 case ISD::ADD: { 1773 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1774 // lmul(x, y, a, b). The high result of lmul will be ignored. 1775 // This is only profitable if the intermediate results are unused 1776 // elsewhere. 1777 SDValue Mul0, Mul1, Addend0, Addend1; 1778 if (N->getValueType(0) == MVT::i32 && 1779 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1780 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1781 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1782 Mul1, Addend0, Addend1); 1783 SDValue Result(Ignored.getNode(), 1); 1784 return Result; 1785 } 1786 APInt HighMask = APInt::getHighBitsSet(64, 32); 1787 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1788 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1789 // before type legalization as it is messy to match the operands after 1790 // that. 1791 if (N->getValueType(0) == MVT::i64 && 1792 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1793 DAG.MaskedValueIsZero(Mul0, HighMask) && 1794 DAG.MaskedValueIsZero(Mul1, HighMask) && 1795 DAG.MaskedValueIsZero(Addend0, HighMask) && 1796 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1797 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1798 Mul0, DAG.getConstant(0, dl, MVT::i32)); 1799 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1800 Mul1, DAG.getConstant(0, dl, MVT::i32)); 1801 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1802 Addend0, DAG.getConstant(0, dl, MVT::i32)); 1803 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1804 Addend1, DAG.getConstant(0, dl, MVT::i32)); 1805 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1806 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1807 Addend0L, Addend1L); 1808 SDValue Lo(Hi.getNode(), 1); 1809 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1810 } 1811 } 1812 break; 1813 case ISD::STORE: { 1814 // Replace unaligned store of unaligned load with memmove. 1815 StoreSDNode *ST = cast<StoreSDNode>(N); 1816 if (!DCI.isBeforeLegalize() || 1817 allowsMisalignedMemoryAccesses(ST->getMemoryVT(), 1818 ST->getAddressSpace(), 1819 ST->getAlignment()) || 1820 ST->isVolatile() || ST->isIndexed()) { 1821 break; 1822 } 1823 SDValue Chain = ST->getChain(); 1824 1825 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1826 assert((StoreBits % 8) == 0 && 1827 "Store size in bits must be a multiple of 8"); 1828 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment( 1829 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1830 unsigned Alignment = ST->getAlignment(); 1831 if (Alignment >= ABIAlignment) { 1832 break; 1833 } 1834 1835 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1836 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1837 LD->getAlignment() == Alignment && 1838 !LD->isVolatile() && !LD->isIndexed() && 1839 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1840 bool isTail = isInTailCallPosition(DAG, ST, Chain); 1841 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1842 LD->getBasePtr(), 1843 DAG.getConstant(StoreBits/8, dl, MVT::i32), 1844 Alignment, false, isTail, ST->getPointerInfo(), 1845 LD->getPointerInfo()); 1846 } 1847 } 1848 break; 1849 } 1850 } 1851 return SDValue(); 1852 } 1853 1854 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1855 APInt &KnownZero, 1856 APInt &KnownOne, 1857 const SelectionDAG &DAG, 1858 unsigned Depth) const { 1859 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1860 switch (Op.getOpcode()) { 1861 default: break; 1862 case XCoreISD::LADD: 1863 case XCoreISD::LSUB: 1864 if (Op.getResNo() == 1) { 1865 // Top bits of carry / borrow are clear. 1866 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1867 KnownZero.getBitWidth() - 1); 1868 } 1869 break; 1870 case ISD::INTRINSIC_W_CHAIN: 1871 { 1872 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1873 switch (IntNo) { 1874 case Intrinsic::xcore_getts: 1875 // High bits are known to be zero. 1876 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1877 KnownZero.getBitWidth() - 16); 1878 break; 1879 case Intrinsic::xcore_int: 1880 case Intrinsic::xcore_inct: 1881 // High bits are known to be zero. 1882 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1883 KnownZero.getBitWidth() - 8); 1884 break; 1885 case Intrinsic::xcore_testct: 1886 // Result is either 0 or 1. 1887 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1888 KnownZero.getBitWidth() - 1); 1889 break; 1890 case Intrinsic::xcore_testwct: 1891 // Result is in the range 0 - 4. 1892 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1893 KnownZero.getBitWidth() - 3); 1894 break; 1895 } 1896 } 1897 break; 1898 } 1899 } 1900 1901 //===----------------------------------------------------------------------===// 1902 // Addressing mode description hooks 1903 //===----------------------------------------------------------------------===// 1904 1905 static inline bool isImmUs(int64_t val) 1906 { 1907 return (val >= 0 && val <= 11); 1908 } 1909 1910 static inline bool isImmUs2(int64_t val) 1911 { 1912 return (val%2 == 0 && isImmUs(val/2)); 1913 } 1914 1915 static inline bool isImmUs4(int64_t val) 1916 { 1917 return (val%4 == 0 && isImmUs(val/4)); 1918 } 1919 1920 /// isLegalAddressingMode - Return true if the addressing mode represented 1921 /// by AM is legal for this target, for a load/store of the specified type. 1922 bool XCoreTargetLowering::isLegalAddressingMode(const DataLayout &DL, 1923 const AddrMode &AM, Type *Ty, 1924 unsigned AS) const { 1925 if (Ty->getTypeID() == Type::VoidTyID) 1926 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1927 1928 unsigned Size = DL.getTypeAllocSize(Ty); 1929 if (AM.BaseGV) { 1930 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1931 AM.BaseOffs%4 == 0; 1932 } 1933 1934 switch (Size) { 1935 case 1: 1936 // reg + imm 1937 if (AM.Scale == 0) { 1938 return isImmUs(AM.BaseOffs); 1939 } 1940 // reg + reg 1941 return AM.Scale == 1 && AM.BaseOffs == 0; 1942 case 2: 1943 case 3: 1944 // reg + imm 1945 if (AM.Scale == 0) { 1946 return isImmUs2(AM.BaseOffs); 1947 } 1948 // reg + reg<<1 1949 return AM.Scale == 2 && AM.BaseOffs == 0; 1950 default: 1951 // reg + imm 1952 if (AM.Scale == 0) { 1953 return isImmUs4(AM.BaseOffs); 1954 } 1955 // reg + reg<<2 1956 return AM.Scale == 4 && AM.BaseOffs == 0; 1957 } 1958 } 1959 1960 //===----------------------------------------------------------------------===// 1961 // XCore Inline Assembly Support 1962 //===----------------------------------------------------------------------===// 1963 1964 std::pair<unsigned, const TargetRegisterClass *> 1965 XCoreTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1966 StringRef Constraint, 1967 MVT VT) const { 1968 if (Constraint.size() == 1) { 1969 switch (Constraint[0]) { 1970 default : break; 1971 case 'r': 1972 return std::make_pair(0U, &XCore::GRRegsRegClass); 1973 } 1974 } 1975 // Use the default implementation in TargetLowering to convert the register 1976 // constraint into a member of a register class. 1977 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 1978 } 1979