1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the XCoreTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "XCoreISelLowering.h" 15 #include "XCore.h" 16 #include "XCoreMachineFunctionInfo.h" 17 #include "XCoreSubtarget.h" 18 #include "XCoreTargetMachine.h" 19 #include "XCoreTargetObjectFile.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/MachineFrameInfo.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstrBuilder.h" 24 #include "llvm/CodeGen/MachineJumpTableInfo.h" 25 #include "llvm/CodeGen/MachineRegisterInfo.h" 26 #include "llvm/CodeGen/SelectionDAGISel.h" 27 #include "llvm/CodeGen/ValueTypes.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 #define DEBUG_TYPE "xcore-lower" 43 44 const char *XCoreTargetLowering:: 45 getTargetNodeName(unsigned Opcode) const 46 { 47 switch (Opcode) 48 { 49 case XCoreISD::BL : return "XCoreISD::BL"; 50 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 51 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 52 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 53 case XCoreISD::LDWSP : return "XCoreISD::LDWSP"; 54 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 55 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 56 case XCoreISD::LADD : return "XCoreISD::LADD"; 57 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 58 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 59 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 60 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 61 case XCoreISD::CRC8 : return "XCoreISD::CRC8"; 62 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 63 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 64 case XCoreISD::FRAME_TO_ARGS_OFFSET : return "XCoreISD::FRAME_TO_ARGS_OFFSET"; 65 case XCoreISD::EH_RETURN : return "XCoreISD::EH_RETURN"; 66 case XCoreISD::MEMBARRIER : return "XCoreISD::MEMBARRIER"; 67 default : return nullptr; 68 } 69 } 70 71 XCoreTargetLowering::XCoreTargetLowering(const TargetMachine &TM) 72 : TargetLowering(TM, new XCoreTargetObjectFile()), TM(TM), 73 Subtarget(TM.getSubtarget<XCoreSubtarget>()) { 74 75 // Set up the register classes. 76 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); 77 78 // Compute derived properties from the register classes 79 computeRegisterProperties(); 80 81 // Division is expensive 82 setIntDivIsCheap(false); 83 84 setStackPointerRegisterToSaveRestore(XCore::SP); 85 86 setSchedulingPreference(Sched::Source); 87 88 // Use i32 for setcc operations results (slt, sgt, ...). 89 setBooleanContents(ZeroOrOneBooleanContent); 90 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 91 92 // XCore does not have the NodeTypes below. 93 setOperationAction(ISD::BR_CC, MVT::i32, Expand); 94 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 95 setOperationAction(ISD::ADDC, MVT::i32, Expand); 96 setOperationAction(ISD::ADDE, MVT::i32, Expand); 97 setOperationAction(ISD::SUBC, MVT::i32, Expand); 98 setOperationAction(ISD::SUBE, MVT::i32, Expand); 99 100 // 64bit 101 setOperationAction(ISD::ADD, MVT::i64, Custom); 102 setOperationAction(ISD::SUB, MVT::i64, Custom); 103 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 104 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 105 setOperationAction(ISD::MULHS, MVT::i32, Expand); 106 setOperationAction(ISD::MULHU, MVT::i32, Expand); 107 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 108 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 109 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 110 111 // Bit Manipulation 112 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 113 setOperationAction(ISD::ROTL , MVT::i32, Expand); 114 setOperationAction(ISD::ROTR , MVT::i32, Expand); 115 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 116 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 117 118 setOperationAction(ISD::TRAP, MVT::Other, Legal); 119 120 // Jump tables. 121 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 122 123 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 124 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 125 126 // Conversion of i64 -> double produces constantpool nodes 127 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 128 129 // Loads 130 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 131 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 132 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 133 134 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 135 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); 136 137 // Custom expand misaligned loads / stores. 138 setOperationAction(ISD::LOAD, MVT::i32, Custom); 139 setOperationAction(ISD::STORE, MVT::i32, Custom); 140 141 // Varargs 142 setOperationAction(ISD::VAEND, MVT::Other, Expand); 143 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 144 setOperationAction(ISD::VAARG, MVT::Other, Custom); 145 setOperationAction(ISD::VASTART, MVT::Other, Custom); 146 147 // Dynamic stack 148 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 149 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 150 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 151 152 // Exception handling 153 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); 154 setExceptionPointerRegister(XCore::R0); 155 setExceptionSelectorRegister(XCore::R1); 156 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); 157 158 // Atomic operations 159 // We request a fence for ATOMIC_* instructions, to reduce them to Monotonic. 160 // As we are always Sequential Consistent, an ATOMIC_FENCE becomes a no OP. 161 setInsertFencesForAtomic(true); 162 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 163 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 164 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 165 166 // TRAMPOLINE is custom lowered. 167 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 168 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 169 170 // We want to custom lower some of our intrinsics. 171 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 172 173 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4; 174 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize 175 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2; 176 177 // We have target-specific dag combine patterns for the following nodes: 178 setTargetDAGCombine(ISD::STORE); 179 setTargetDAGCombine(ISD::ADD); 180 setTargetDAGCombine(ISD::INTRINSIC_VOID); 181 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 182 183 setMinFunctionAlignment(1); 184 setPrefFunctionAlignment(2); 185 } 186 187 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 188 if (Val.getOpcode() != ISD::LOAD) 189 return false; 190 191 EVT VT1 = Val.getValueType(); 192 if (!VT1.isSimple() || !VT1.isInteger() || 193 !VT2.isSimple() || !VT2.isInteger()) 194 return false; 195 196 switch (VT1.getSimpleVT().SimpleTy) { 197 default: break; 198 case MVT::i8: 199 return true; 200 } 201 202 return false; 203 } 204 205 SDValue XCoreTargetLowering:: 206 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 207 switch (Op.getOpcode()) 208 { 209 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); 210 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 211 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 212 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 213 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 214 case ISD::LOAD: return LowerLOAD(Op, DAG); 215 case ISD::STORE: return LowerSTORE(Op, DAG); 216 case ISD::VAARG: return LowerVAARG(Op, DAG); 217 case ISD::VASTART: return LowerVASTART(Op, DAG); 218 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 219 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 220 // FIXME: Remove these when LegalizeDAGTypes lands. 221 case ISD::ADD: 222 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 223 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 224 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 225 case ISD::FRAME_TO_ARGS_OFFSET: return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); 226 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 227 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 228 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 229 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); 230 case ISD::ATOMIC_LOAD: return LowerATOMIC_LOAD(Op, DAG); 231 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); 232 default: 233 llvm_unreachable("unimplemented operand"); 234 } 235 } 236 237 /// ReplaceNodeResults - Replace the results of node with an illegal result 238 /// type with new values built out of custom code. 239 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 240 SmallVectorImpl<SDValue>&Results, 241 SelectionDAG &DAG) const { 242 switch (N->getOpcode()) { 243 default: 244 llvm_unreachable("Don't know how to custom expand this!"); 245 case ISD::ADD: 246 case ISD::SUB: 247 Results.push_back(ExpandADDSUB(N, DAG)); 248 return; 249 } 250 } 251 252 //===----------------------------------------------------------------------===// 253 // Misc Lower Operation implementation 254 //===----------------------------------------------------------------------===// 255 256 SDValue XCoreTargetLowering::getGlobalAddressWrapper(SDValue GA, 257 const GlobalValue *GV, 258 SelectionDAG &DAG) const { 259 // FIXME there is no actual debug info here 260 SDLoc dl(GA); 261 262 if (GV->getType()->getElementType()->isFunctionTy()) 263 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 264 265 const auto *GVar = dyn_cast<GlobalVariable>(GV); 266 if ((GV->hasSection() && StringRef(GV->getSection()).startswith(".cp.")) || 267 (GVar && GVar->isConstant() && GV->hasLocalLinkage())) 268 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 269 270 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 271 } 272 273 static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL) { 274 if (XTL.getTargetMachine().getCodeModel() == CodeModel::Small) 275 return true; 276 277 Type *ObjType = GV->getType()->getPointerElementType(); 278 if (!ObjType->isSized()) 279 return false; 280 281 unsigned ObjSize = XTL.getDataLayout()->getTypeAllocSize(ObjType); 282 return ObjSize < CodeModelLargeSize && ObjSize != 0; 283 } 284 285 SDValue XCoreTargetLowering:: 286 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 287 { 288 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 289 const GlobalValue *GV = GN->getGlobal(); 290 SDLoc DL(GN); 291 int64_t Offset = GN->getOffset(); 292 if (IsSmallObject(GV, *this)) { 293 // We can only fold positive offsets that are a multiple of the word size. 294 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0); 295 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset); 296 GA = getGlobalAddressWrapper(GA, GV, DAG); 297 // Handle the rest of the offset. 298 if (Offset != FoldedOffset) { 299 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32); 300 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining); 301 } 302 return GA; 303 } else { 304 // Ideally we would not fold in offset with an index <= 11. 305 Type *Ty = Type::getInt8PtrTy(*DAG.getContext()); 306 Constant *GA = ConstantExpr::getBitCast(const_cast<GlobalValue*>(GV), Ty); 307 Ty = Type::getInt32Ty(*DAG.getContext()); 308 Constant *Idx = ConstantInt::get(Ty, Offset); 309 Constant *GAI = ConstantExpr::getGetElementPtr(GA, Idx); 310 SDValue CP = DAG.getConstantPool(GAI, MVT::i32); 311 return DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), CP, 312 MachinePointerInfo(), false, false, false, 0); 313 } 314 } 315 316 SDValue XCoreTargetLowering:: 317 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 318 { 319 SDLoc DL(Op); 320 321 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 322 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); 323 324 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result); 325 } 326 327 SDValue XCoreTargetLowering:: 328 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 329 { 330 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 331 // FIXME there isn't really debug info here 332 SDLoc dl(CP); 333 EVT PtrVT = Op.getValueType(); 334 SDValue Res; 335 if (CP->isMachineConstantPoolEntry()) { 336 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 337 CP->getAlignment(), CP->getOffset()); 338 } else { 339 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 340 CP->getAlignment(), CP->getOffset()); 341 } 342 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 343 } 344 345 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 346 return MachineJumpTableInfo::EK_Inline; 347 } 348 349 SDValue XCoreTargetLowering:: 350 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 351 { 352 SDValue Chain = Op.getOperand(0); 353 SDValue Table = Op.getOperand(1); 354 SDValue Index = Op.getOperand(2); 355 SDLoc dl(Op); 356 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 357 unsigned JTI = JT->getIndex(); 358 MachineFunction &MF = DAG.getMachineFunction(); 359 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 360 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 361 362 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 363 if (NumEntries <= 32) { 364 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 365 } 366 assert((NumEntries >> 31) == 0); 367 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 368 DAG.getConstant(1, MVT::i32)); 369 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 370 ScaledIndex); 371 } 372 373 SDValue XCoreTargetLowering:: 374 lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base, 375 int64_t Offset, SelectionDAG &DAG) const 376 { 377 if ((Offset & 0x3) == 0) { 378 return DAG.getLoad(getPointerTy(), DL, Chain, Base, MachinePointerInfo(), 379 false, false, false, 0); 380 } 381 // Lower to pair of consecutive word aligned loads plus some bit shifting. 382 int32_t HighOffset = RoundUpToAlignment(Offset, 4); 383 int32_t LowOffset = HighOffset - 4; 384 SDValue LowAddr, HighAddr; 385 if (GlobalAddressSDNode *GASD = 386 dyn_cast<GlobalAddressSDNode>(Base.getNode())) { 387 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 388 LowOffset); 389 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(), 390 HighOffset); 391 } else { 392 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 393 DAG.getConstant(LowOffset, MVT::i32)); 394 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, 395 DAG.getConstant(HighOffset, MVT::i32)); 396 } 397 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, MVT::i32); 398 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, MVT::i32); 399 400 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain, 401 LowAddr, MachinePointerInfo(), 402 false, false, false, 0); 403 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain, 404 HighAddr, MachinePointerInfo(), 405 false, false, false, 0); 406 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 407 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 408 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 409 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 410 High.getValue(1)); 411 SDValue Ops[] = { Result, Chain }; 412 return DAG.getMergeValues(Ops, DL); 413 } 414 415 static bool isWordAligned(SDValue Value, SelectionDAG &DAG) 416 { 417 APInt KnownZero, KnownOne; 418 DAG.computeKnownBits(Value, KnownZero, KnownOne); 419 return KnownZero.countTrailingOnes() >= 2; 420 } 421 422 SDValue XCoreTargetLowering:: 423 LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 424 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 425 LoadSDNode *LD = cast<LoadSDNode>(Op); 426 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 427 "Unexpected extension type"); 428 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 429 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) 430 return SDValue(); 431 432 unsigned ABIAlignment = getDataLayout()-> 433 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 434 // Leave aligned load alone. 435 if (LD->getAlignment() >= ABIAlignment) 436 return SDValue(); 437 438 SDValue Chain = LD->getChain(); 439 SDValue BasePtr = LD->getBasePtr(); 440 SDLoc DL(Op); 441 442 if (!LD->isVolatile()) { 443 const GlobalValue *GV; 444 int64_t Offset = 0; 445 if (DAG.isBaseWithConstantOffset(BasePtr) && 446 isWordAligned(BasePtr->getOperand(0), DAG)) { 447 SDValue NewBasePtr = BasePtr->getOperand(0); 448 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 449 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 450 Offset, DAG); 451 } 452 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) && 453 MinAlign(GV->getAlignment(), 4) == 4) { 454 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL, 455 BasePtr->getValueType(0)); 456 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr, 457 Offset, DAG); 458 } 459 } 460 461 if (LD->getAlignment() == 2) { 462 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, 463 BasePtr, LD->getPointerInfo(), MVT::i16, 464 LD->isVolatile(), LD->isNonTemporal(), 2); 465 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 466 DAG.getConstant(2, MVT::i32)); 467 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 468 HighAddr, 469 LD->getPointerInfo().getWithOffset(2), 470 MVT::i16, LD->isVolatile(), 471 LD->isNonTemporal(), 2); 472 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 473 DAG.getConstant(16, MVT::i32)); 474 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 475 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 476 High.getValue(1)); 477 SDValue Ops[] = { Result, Chain }; 478 return DAG.getMergeValues(Ops, DL); 479 } 480 481 // Lower to a call to __misaligned_load(BasePtr). 482 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); 483 TargetLowering::ArgListTy Args; 484 TargetLowering::ArgListEntry Entry; 485 486 Entry.Ty = IntPtrTy; 487 Entry.Node = BasePtr; 488 Args.push_back(Entry); 489 490 TargetLowering::CallLoweringInfo CLI(DAG); 491 CLI.setDebugLoc(DL).setChain(Chain) 492 .setCallee(CallingConv::C, IntPtrTy, 493 DAG.getExternalSymbol("__misaligned_load", getPointerTy()), 494 std::move(Args), 0); 495 496 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 497 SDValue Ops[] = { CallResult.first, CallResult.second }; 498 return DAG.getMergeValues(Ops, DL); 499 } 500 501 SDValue XCoreTargetLowering:: 502 LowerSTORE(SDValue Op, SelectionDAG &DAG) const 503 { 504 StoreSDNode *ST = cast<StoreSDNode>(Op); 505 assert(!ST->isTruncatingStore() && "Unexpected store type"); 506 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 507 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 508 return SDValue(); 509 } 510 unsigned ABIAlignment = getDataLayout()-> 511 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 512 // Leave aligned store alone. 513 if (ST->getAlignment() >= ABIAlignment) { 514 return SDValue(); 515 } 516 SDValue Chain = ST->getChain(); 517 SDValue BasePtr = ST->getBasePtr(); 518 SDValue Value = ST->getValue(); 519 SDLoc dl(Op); 520 521 if (ST->getAlignment() == 2) { 522 SDValue Low = Value; 523 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 524 DAG.getConstant(16, MVT::i32)); 525 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 526 ST->getPointerInfo(), MVT::i16, 527 ST->isVolatile(), ST->isNonTemporal(), 528 2); 529 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 530 DAG.getConstant(2, MVT::i32)); 531 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 532 ST->getPointerInfo().getWithOffset(2), 533 MVT::i16, ST->isVolatile(), 534 ST->isNonTemporal(), 2); 535 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 536 } 537 538 // Lower to a call to __misaligned_store(BasePtr, Value). 539 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); 540 TargetLowering::ArgListTy Args; 541 TargetLowering::ArgListEntry Entry; 542 543 Entry.Ty = IntPtrTy; 544 Entry.Node = BasePtr; 545 Args.push_back(Entry); 546 547 Entry.Node = Value; 548 Args.push_back(Entry); 549 550 TargetLowering::CallLoweringInfo CLI(DAG); 551 CLI.setDebugLoc(dl).setChain(Chain) 552 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 553 DAG.getExternalSymbol("__misaligned_store", getPointerTy()), 554 std::move(Args), 0); 555 556 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 557 return CallResult.second; 558 } 559 560 SDValue XCoreTargetLowering:: 561 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 562 { 563 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 564 "Unexpected operand to lower!"); 565 SDLoc dl(Op); 566 SDValue LHS = Op.getOperand(0); 567 SDValue RHS = Op.getOperand(1); 568 SDValue Zero = DAG.getConstant(0, MVT::i32); 569 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 570 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 571 LHS, RHS); 572 SDValue Lo(Hi.getNode(), 1); 573 SDValue Ops[] = { Lo, Hi }; 574 return DAG.getMergeValues(Ops, dl); 575 } 576 577 SDValue XCoreTargetLowering:: 578 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 579 { 580 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 581 "Unexpected operand to lower!"); 582 SDLoc dl(Op); 583 SDValue LHS = Op.getOperand(0); 584 SDValue RHS = Op.getOperand(1); 585 SDValue Zero = DAG.getConstant(0, MVT::i32); 586 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 587 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 588 Zero, Zero); 589 SDValue Lo(Hi.getNode(), 1); 590 SDValue Ops[] = { Lo, Hi }; 591 return DAG.getMergeValues(Ops, dl); 592 } 593 594 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 595 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 596 /// each intermediate result in the calculation must also have a single use. 597 /// If the Op is in the correct form the constituent parts are written to Mul0, 598 /// Mul1, Addend0 and Addend1. 599 static bool 600 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 601 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 602 { 603 if (Op.getOpcode() != ISD::ADD) 604 return false; 605 SDValue N0 = Op.getOperand(0); 606 SDValue N1 = Op.getOperand(1); 607 SDValue AddOp; 608 SDValue OtherOp; 609 if (N0.getOpcode() == ISD::ADD) { 610 AddOp = N0; 611 OtherOp = N1; 612 } else if (N1.getOpcode() == ISD::ADD) { 613 AddOp = N1; 614 OtherOp = N0; 615 } else { 616 return false; 617 } 618 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 619 return false; 620 if (OtherOp.getOpcode() == ISD::MUL) { 621 // add(add(a,b),mul(x,y)) 622 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 623 return false; 624 Mul0 = OtherOp.getOperand(0); 625 Mul1 = OtherOp.getOperand(1); 626 Addend0 = AddOp.getOperand(0); 627 Addend1 = AddOp.getOperand(1); 628 return true; 629 } 630 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 631 // add(add(mul(x,y),a),b) 632 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 633 return false; 634 Mul0 = AddOp.getOperand(0).getOperand(0); 635 Mul1 = AddOp.getOperand(0).getOperand(1); 636 Addend0 = AddOp.getOperand(1); 637 Addend1 = OtherOp; 638 return true; 639 } 640 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 641 // add(add(a,mul(x,y)),b) 642 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 643 return false; 644 Mul0 = AddOp.getOperand(1).getOperand(0); 645 Mul1 = AddOp.getOperand(1).getOperand(1); 646 Addend0 = AddOp.getOperand(0); 647 Addend1 = OtherOp; 648 return true; 649 } 650 return false; 651 } 652 653 SDValue XCoreTargetLowering:: 654 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 655 { 656 SDValue Mul; 657 SDValue Other; 658 if (N->getOperand(0).getOpcode() == ISD::MUL) { 659 Mul = N->getOperand(0); 660 Other = N->getOperand(1); 661 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 662 Mul = N->getOperand(1); 663 Other = N->getOperand(0); 664 } else { 665 return SDValue(); 666 } 667 SDLoc dl(N); 668 SDValue LL, RL, AddendL, AddendH; 669 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 670 Mul.getOperand(0), DAG.getConstant(0, MVT::i32)); 671 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 672 Mul.getOperand(1), DAG.getConstant(0, MVT::i32)); 673 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 674 Other, DAG.getConstant(0, MVT::i32)); 675 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 676 Other, DAG.getConstant(1, MVT::i32)); 677 APInt HighMask = APInt::getHighBitsSet(64, 32); 678 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 679 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 680 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 681 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 682 // The inputs are both zero-extended. 683 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 684 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 685 AddendL, LL, RL); 686 SDValue Lo(Hi.getNode(), 1); 687 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 688 } 689 if (LHSSB > 32 && RHSSB > 32) { 690 // The inputs are both sign-extended. 691 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 692 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 693 AddendL, LL, RL); 694 SDValue Lo(Hi.getNode(), 1); 695 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 696 } 697 SDValue LH, RH; 698 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 699 Mul.getOperand(0), DAG.getConstant(1, MVT::i32)); 700 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 701 Mul.getOperand(1), DAG.getConstant(1, MVT::i32)); 702 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 703 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 704 AddendL, LL, RL); 705 SDValue Lo(Hi.getNode(), 1); 706 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 707 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 708 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 709 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 710 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 711 } 712 713 SDValue XCoreTargetLowering:: 714 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 715 { 716 assert(N->getValueType(0) == MVT::i64 && 717 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 718 "Unknown operand to lower!"); 719 720 if (N->getOpcode() == ISD::ADD) { 721 SDValue Result = TryExpandADDWithMul(N, DAG); 722 if (Result.getNode()) 723 return Result; 724 } 725 726 SDLoc dl(N); 727 728 // Extract components 729 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 730 N->getOperand(0), DAG.getConstant(0, MVT::i32)); 731 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 732 N->getOperand(0), DAG.getConstant(1, MVT::i32)); 733 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 734 N->getOperand(1), DAG.getConstant(0, MVT::i32)); 735 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 736 N->getOperand(1), DAG.getConstant(1, MVT::i32)); 737 738 // Expand 739 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 740 XCoreISD::LSUB; 741 SDValue Zero = DAG.getConstant(0, MVT::i32); 742 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 743 LHSL, RHSL, Zero); 744 SDValue Carry(Lo.getNode(), 1); 745 746 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 747 LHSH, RHSH, Carry); 748 SDValue Ignored(Hi.getNode(), 1); 749 // Merge the pieces 750 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 751 } 752 753 SDValue XCoreTargetLowering:: 754 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 755 { 756 // Whist llvm does not support aggregate varargs we can ignore 757 // the possibility of the ValueType being an implicit byVal vararg. 758 SDNode *Node = Op.getNode(); 759 EVT VT = Node->getValueType(0); // not an aggregate 760 SDValue InChain = Node->getOperand(0); 761 SDValue VAListPtr = Node->getOperand(1); 762 EVT PtrVT = VAListPtr.getValueType(); 763 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 764 SDLoc dl(Node); 765 SDValue VAList = DAG.getLoad(PtrVT, dl, InChain, 766 VAListPtr, MachinePointerInfo(SV), 767 false, false, false, 0); 768 // Increment the pointer, VAList, to the next vararg 769 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList, 770 DAG.getIntPtrConstant(VT.getSizeInBits() / 8)); 771 // Store the incremented VAList to the legalized pointer 772 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr, 773 MachinePointerInfo(SV), false, false, 0); 774 // Load the actual argument out of the pointer VAList 775 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(), 776 false, false, false, 0); 777 } 778 779 SDValue XCoreTargetLowering:: 780 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 781 { 782 SDLoc dl(Op); 783 // vastart stores the address of the VarArgsFrameIndex slot into the 784 // memory location argument 785 MachineFunction &MF = DAG.getMachineFunction(); 786 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 787 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 788 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 789 MachinePointerInfo(), false, false, 0); 790 } 791 792 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 793 SelectionDAG &DAG) const { 794 // This nodes represent llvm.frameaddress on the DAG. 795 // It takes one operand, the index of the frame address to return. 796 // An index of zero corresponds to the current function's frame address. 797 // An index of one to the parent's frame address, and so on. 798 // Depths > 0 not supported yet! 799 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 800 return SDValue(); 801 802 MachineFunction &MF = DAG.getMachineFunction(); 803 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); 804 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), 805 RegInfo->getFrameRegister(MF), MVT::i32); 806 } 807 808 SDValue XCoreTargetLowering:: 809 LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const { 810 // This nodes represent llvm.returnaddress on the DAG. 811 // It takes one operand, the index of the return address to return. 812 // An index of zero corresponds to the current function's return address. 813 // An index of one to the parent's return address, and so on. 814 // Depths > 0 not supported yet! 815 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 816 return SDValue(); 817 818 MachineFunction &MF = DAG.getMachineFunction(); 819 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 820 int FI = XFI->createLRSpillSlot(MF); 821 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 822 return DAG.getLoad(getPointerTy(), SDLoc(Op), DAG.getEntryNode(), FIN, 823 MachinePointerInfo::getFixedStack(FI), false, false, 824 false, 0); 825 } 826 827 SDValue XCoreTargetLowering:: 828 LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { 829 // This node represents offset from frame pointer to first on-stack argument. 830 // This is needed for correct stack adjustment during unwind. 831 // However, we don't know the offset until after the frame has be finalised. 832 // This is done during the XCoreFTAOElim pass. 833 return DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, SDLoc(Op), MVT::i32); 834 } 835 836 SDValue XCoreTargetLowering:: 837 LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { 838 // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) 839 // This node represents 'eh_return' gcc dwarf builtin, which is used to 840 // return from exception. The general meaning is: adjust stack by OFFSET and 841 // pass execution to HANDLER. 842 MachineFunction &MF = DAG.getMachineFunction(); 843 SDValue Chain = Op.getOperand(0); 844 SDValue Offset = Op.getOperand(1); 845 SDValue Handler = Op.getOperand(2); 846 SDLoc dl(Op); 847 848 // Absolute SP = (FP + FrameToArgs) + Offset 849 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); 850 SDValue Stack = DAG.getCopyFromReg(DAG.getEntryNode(), dl, 851 RegInfo->getFrameRegister(MF), MVT::i32); 852 SDValue FrameToArgs = DAG.getNode(XCoreISD::FRAME_TO_ARGS_OFFSET, dl, 853 MVT::i32); 854 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, FrameToArgs); 855 Stack = DAG.getNode(ISD::ADD, dl, MVT::i32, Stack, Offset); 856 857 // R0=ExceptionPointerRegister R1=ExceptionSelectorRegister 858 // which leaves 2 caller saved registers, R2 & R3 for us to use. 859 unsigned StackReg = XCore::R2; 860 unsigned HandlerReg = XCore::R3; 861 862 SDValue OutChains[] = { 863 DAG.getCopyToReg(Chain, dl, StackReg, Stack), 864 DAG.getCopyToReg(Chain, dl, HandlerReg, Handler) 865 }; 866 867 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 868 869 return DAG.getNode(XCoreISD::EH_RETURN, dl, MVT::Other, Chain, 870 DAG.getRegister(StackReg, MVT::i32), 871 DAG.getRegister(HandlerReg, MVT::i32)); 872 873 } 874 875 SDValue XCoreTargetLowering:: 876 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 877 return Op.getOperand(0); 878 } 879 880 SDValue XCoreTargetLowering:: 881 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 882 SDValue Chain = Op.getOperand(0); 883 SDValue Trmp = Op.getOperand(1); // trampoline 884 SDValue FPtr = Op.getOperand(2); // nested function 885 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 886 887 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 888 889 // .align 4 890 // LDAPF_u10 r11, nest 891 // LDW_2rus r11, r11[0] 892 // STWSP_ru6 r11, sp[0] 893 // LDAPF_u10 r11, fptr 894 // LDW_2rus r11, r11[0] 895 // BAU_1r r11 896 // nest: 897 // .word nest 898 // fptr: 899 // .word fptr 900 SDValue OutChains[5]; 901 902 SDValue Addr = Trmp; 903 904 SDLoc dl(Op); 905 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32), 906 Addr, MachinePointerInfo(TrmpAddr), false, false, 907 0); 908 909 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 910 DAG.getConstant(4, MVT::i32)); 911 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32), 912 Addr, MachinePointerInfo(TrmpAddr, 4), false, 913 false, 0); 914 915 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 916 DAG.getConstant(8, MVT::i32)); 917 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32), 918 Addr, MachinePointerInfo(TrmpAddr, 8), false, 919 false, 0); 920 921 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 922 DAG.getConstant(12, MVT::i32)); 923 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr, 924 MachinePointerInfo(TrmpAddr, 12), false, false, 925 0); 926 927 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 928 DAG.getConstant(16, MVT::i32)); 929 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr, 930 MachinePointerInfo(TrmpAddr, 16), false, false, 931 0); 932 933 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); 934 } 935 936 SDValue XCoreTargetLowering:: 937 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { 938 SDLoc DL(Op); 939 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 940 switch (IntNo) { 941 case Intrinsic::xcore_crc8: 942 EVT VT = Op.getValueType(); 943 SDValue Data = 944 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT), 945 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3)); 946 SDValue Crc(Data.getNode(), 1); 947 SDValue Results[] = { Crc, Data }; 948 return DAG.getMergeValues(Results, DL); 949 } 950 return SDValue(); 951 } 952 953 SDValue XCoreTargetLowering:: 954 LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const { 955 SDLoc DL(Op); 956 return DAG.getNode(XCoreISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0)); 957 } 958 959 SDValue XCoreTargetLowering:: 960 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { 961 AtomicSDNode *N = cast<AtomicSDNode>(Op); 962 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); 963 assert(N->getOrdering() <= Monotonic && 964 "setInsertFencesForAtomic(true) and yet greater than Monotonic"); 965 if (N->getMemoryVT() == MVT::i32) { 966 if (N->getAlignment() < 4) 967 report_fatal_error("atomic load must be aligned"); 968 return DAG.getLoad(getPointerTy(), SDLoc(Op), N->getChain(), 969 N->getBasePtr(), N->getPointerInfo(), 970 N->isVolatile(), N->isNonTemporal(), 971 N->isInvariant(), N->getAlignment(), 972 N->getTBAAInfo(), N->getRanges()); 973 } 974 if (N->getMemoryVT() == MVT::i16) { 975 if (N->getAlignment() < 2) 976 report_fatal_error("atomic load must be aligned"); 977 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 978 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 979 N->isVolatile(), N->isNonTemporal(), 980 N->getAlignment(), N->getTBAAInfo()); 981 } 982 if (N->getMemoryVT() == MVT::i8) 983 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), MVT::i32, N->getChain(), 984 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 985 N->isVolatile(), N->isNonTemporal(), 986 N->getAlignment(), N->getTBAAInfo()); 987 return SDValue(); 988 } 989 990 SDValue XCoreTargetLowering:: 991 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { 992 AtomicSDNode *N = cast<AtomicSDNode>(Op); 993 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); 994 assert(N->getOrdering() <= Monotonic && 995 "setInsertFencesForAtomic(true) and yet greater than Monotonic"); 996 if (N->getMemoryVT() == MVT::i32) { 997 if (N->getAlignment() < 4) 998 report_fatal_error("atomic store must be aligned"); 999 return DAG.getStore(N->getChain(), SDLoc(Op), N->getVal(), 1000 N->getBasePtr(), N->getPointerInfo(), 1001 N->isVolatile(), N->isNonTemporal(), 1002 N->getAlignment(), N->getTBAAInfo()); 1003 } 1004 if (N->getMemoryVT() == MVT::i16) { 1005 if (N->getAlignment() < 2) 1006 report_fatal_error("atomic store must be aligned"); 1007 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1008 N->getBasePtr(), N->getPointerInfo(), MVT::i16, 1009 N->isVolatile(), N->isNonTemporal(), 1010 N->getAlignment(), N->getTBAAInfo()); 1011 } 1012 if (N->getMemoryVT() == MVT::i8) 1013 return DAG.getTruncStore(N->getChain(), SDLoc(Op), N->getVal(), 1014 N->getBasePtr(), N->getPointerInfo(), MVT::i8, 1015 N->isVolatile(), N->isNonTemporal(), 1016 N->getAlignment(), N->getTBAAInfo()); 1017 return SDValue(); 1018 } 1019 1020 //===----------------------------------------------------------------------===// 1021 // Calling Convention Implementation 1022 //===----------------------------------------------------------------------===// 1023 1024 #include "XCoreGenCallingConv.inc" 1025 1026 //===----------------------------------------------------------------------===// 1027 // Call Calling Convention Implementation 1028 //===----------------------------------------------------------------------===// 1029 1030 /// XCore call implementation 1031 SDValue 1032 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1033 SmallVectorImpl<SDValue> &InVals) const { 1034 SelectionDAG &DAG = CLI.DAG; 1035 SDLoc &dl = CLI.DL; 1036 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1037 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1038 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1039 SDValue Chain = CLI.Chain; 1040 SDValue Callee = CLI.Callee; 1041 bool &isTailCall = CLI.IsTailCall; 1042 CallingConv::ID CallConv = CLI.CallConv; 1043 bool isVarArg = CLI.IsVarArg; 1044 1045 // XCore target does not yet support tail call optimization. 1046 isTailCall = false; 1047 1048 // For now, only CallingConv::C implemented 1049 switch (CallConv) 1050 { 1051 default: 1052 llvm_unreachable("Unsupported calling convention"); 1053 case CallingConv::Fast: 1054 case CallingConv::C: 1055 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 1056 Outs, OutVals, Ins, dl, DAG, InVals); 1057 } 1058 } 1059 1060 /// LowerCallResult - Lower the result values of a call into the 1061 /// appropriate copies out of appropriate physical registers / memory locations. 1062 static SDValue 1063 LowerCallResult(SDValue Chain, SDValue InFlag, 1064 const SmallVectorImpl<CCValAssign> &RVLocs, 1065 SDLoc dl, SelectionDAG &DAG, 1066 SmallVectorImpl<SDValue> &InVals) { 1067 SmallVector<std::pair<int, unsigned>, 4> ResultMemLocs; 1068 // Copy results out of physical registers. 1069 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1070 const CCValAssign &VA = RVLocs[i]; 1071 if (VA.isRegLoc()) { 1072 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getValVT(), 1073 InFlag).getValue(1); 1074 InFlag = Chain.getValue(2); 1075 InVals.push_back(Chain.getValue(0)); 1076 } else { 1077 assert(VA.isMemLoc()); 1078 ResultMemLocs.push_back(std::make_pair(VA.getLocMemOffset(), 1079 InVals.size())); 1080 // Reserve space for this result. 1081 InVals.push_back(SDValue()); 1082 } 1083 } 1084 1085 // Copy results out of memory. 1086 SmallVector<SDValue, 4> MemOpChains; 1087 for (unsigned i = 0, e = ResultMemLocs.size(); i != e; ++i) { 1088 int offset = ResultMemLocs[i].first; 1089 unsigned index = ResultMemLocs[i].second; 1090 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); 1091 SDValue Ops[] = { Chain, DAG.getConstant(offset / 4, MVT::i32) }; 1092 SDValue load = DAG.getNode(XCoreISD::LDWSP, dl, VTs, Ops); 1093 InVals[index] = load; 1094 MemOpChains.push_back(load.getValue(1)); 1095 } 1096 1097 // Transform all loads nodes into one single node because 1098 // all load nodes are independent of each other. 1099 if (!MemOpChains.empty()) 1100 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1101 1102 return Chain; 1103 } 1104 1105 /// LowerCCCCallTo - functions arguments are copied from virtual 1106 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 1107 /// CALLSEQ_END are emitted. 1108 /// TODO: isTailCall, sret. 1109 SDValue 1110 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 1111 CallingConv::ID CallConv, bool isVarArg, 1112 bool isTailCall, 1113 const SmallVectorImpl<ISD::OutputArg> &Outs, 1114 const SmallVectorImpl<SDValue> &OutVals, 1115 const SmallVectorImpl<ISD::InputArg> &Ins, 1116 SDLoc dl, SelectionDAG &DAG, 1117 SmallVectorImpl<SDValue> &InVals) const { 1118 1119 // Analyze operands of the call, assigning locations to each operand. 1120 SmallVector<CCValAssign, 16> ArgLocs; 1121 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1122 getTargetMachine(), ArgLocs, *DAG.getContext()); 1123 1124 // The ABI dictates there should be one stack slot available to the callee 1125 // on function entry (for saving lr). 1126 CCInfo.AllocateStack(4, 4); 1127 1128 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 1129 1130 SmallVector<CCValAssign, 16> RVLocs; 1131 // Analyze return values to determine the number of bytes of stack required. 1132 CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1133 getTargetMachine(), RVLocs, *DAG.getContext()); 1134 RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); 1135 RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1136 1137 // Get a count of how many bytes are to be pushed on the stack. 1138 unsigned NumBytes = RetCCInfo.getNextStackOffset(); 1139 1140 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, 1141 getPointerTy(), true), dl); 1142 1143 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 1144 SmallVector<SDValue, 12> MemOpChains; 1145 1146 // Walk the register/memloc assignments, inserting copies/loads. 1147 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1148 CCValAssign &VA = ArgLocs[i]; 1149 SDValue Arg = OutVals[i]; 1150 1151 // Promote the value if needed. 1152 switch (VA.getLocInfo()) { 1153 default: llvm_unreachable("Unknown loc info!"); 1154 case CCValAssign::Full: break; 1155 case CCValAssign::SExt: 1156 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1157 break; 1158 case CCValAssign::ZExt: 1159 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1160 break; 1161 case CCValAssign::AExt: 1162 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1163 break; 1164 } 1165 1166 // Arguments that can be passed on register must be kept at 1167 // RegsToPass vector 1168 if (VA.isRegLoc()) { 1169 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1170 } else { 1171 assert(VA.isMemLoc()); 1172 1173 int Offset = VA.getLocMemOffset(); 1174 1175 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 1176 Chain, Arg, 1177 DAG.getConstant(Offset/4, MVT::i32))); 1178 } 1179 } 1180 1181 // Transform all store nodes into one single node because 1182 // all store nodes are independent of each other. 1183 if (!MemOpChains.empty()) 1184 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1185 1186 // Build a sequence of copy-to-reg nodes chained together with token 1187 // chain and flag operands which copy the outgoing args into registers. 1188 // The InFlag in necessary since all emitted instructions must be 1189 // stuck together. 1190 SDValue InFlag; 1191 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1192 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1193 RegsToPass[i].second, InFlag); 1194 InFlag = Chain.getValue(1); 1195 } 1196 1197 // If the callee is a GlobalAddress node (quite common, every direct call is) 1198 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 1199 // Likewise ExternalSymbol -> TargetExternalSymbol. 1200 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1201 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 1202 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 1203 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 1204 1205 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 1206 // = Chain, Callee, Reg#1, Reg#2, ... 1207 // 1208 // Returns a chain & a flag for retval copy to use. 1209 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1210 SmallVector<SDValue, 8> Ops; 1211 Ops.push_back(Chain); 1212 Ops.push_back(Callee); 1213 1214 // Add argument registers to the end of the list so that they are 1215 // known live into the call. 1216 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1217 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1218 RegsToPass[i].second.getValueType())); 1219 1220 if (InFlag.getNode()) 1221 Ops.push_back(InFlag); 1222 1223 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, Ops); 1224 InFlag = Chain.getValue(1); 1225 1226 // Create the CALLSEQ_END node. 1227 Chain = DAG.getCALLSEQ_END(Chain, 1228 DAG.getConstant(NumBytes, getPointerTy(), true), 1229 DAG.getConstant(0, getPointerTy(), true), 1230 InFlag, dl); 1231 InFlag = Chain.getValue(1); 1232 1233 // Handle result values, copying them out of physregs into vregs that we 1234 // return. 1235 return LowerCallResult(Chain, InFlag, RVLocs, dl, DAG, InVals); 1236 } 1237 1238 //===----------------------------------------------------------------------===// 1239 // Formal Arguments Calling Convention Implementation 1240 //===----------------------------------------------------------------------===// 1241 1242 namespace { 1243 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; }; 1244 } 1245 1246 /// XCore formal arguments implementation 1247 SDValue 1248 XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 1249 CallingConv::ID CallConv, 1250 bool isVarArg, 1251 const SmallVectorImpl<ISD::InputArg> &Ins, 1252 SDLoc dl, 1253 SelectionDAG &DAG, 1254 SmallVectorImpl<SDValue> &InVals) 1255 const { 1256 switch (CallConv) 1257 { 1258 default: 1259 llvm_unreachable("Unsupported calling convention"); 1260 case CallingConv::C: 1261 case CallingConv::Fast: 1262 return LowerCCCArguments(Chain, CallConv, isVarArg, 1263 Ins, dl, DAG, InVals); 1264 } 1265 } 1266 1267 /// LowerCCCArguments - transform physical registers into 1268 /// virtual registers and generate load operations for 1269 /// arguments places on the stack. 1270 /// TODO: sret 1271 SDValue 1272 XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 1273 CallingConv::ID CallConv, 1274 bool isVarArg, 1275 const SmallVectorImpl<ISD::InputArg> 1276 &Ins, 1277 SDLoc dl, 1278 SelectionDAG &DAG, 1279 SmallVectorImpl<SDValue> &InVals) const { 1280 MachineFunction &MF = DAG.getMachineFunction(); 1281 MachineFrameInfo *MFI = MF.getFrameInfo(); 1282 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1283 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1284 1285 // Assign locations to all of the incoming arguments. 1286 SmallVector<CCValAssign, 16> ArgLocs; 1287 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1288 getTargetMachine(), ArgLocs, *DAG.getContext()); 1289 1290 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1291 1292 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1293 1294 unsigned LRSaveSize = StackSlotSize; 1295 1296 if (!isVarArg) 1297 XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); 1298 1299 // All getCopyFromReg ops must precede any getMemcpys to prevent the 1300 // scheduler clobbering a register before it has been copied. 1301 // The stages are: 1302 // 1. CopyFromReg (and load) arg & vararg registers. 1303 // 2. Chain CopyFromReg nodes into a TokenFactor. 1304 // 3. Memcpy 'byVal' args & push final InVals. 1305 // 4. Chain mem ops nodes into a TokenFactor. 1306 SmallVector<SDValue, 4> CFRegNode; 1307 SmallVector<ArgDataPair, 4> ArgData; 1308 SmallVector<SDValue, 4> MemOps; 1309 1310 // 1a. CopyFromReg (and load) arg registers. 1311 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1312 1313 CCValAssign &VA = ArgLocs[i]; 1314 SDValue ArgIn; 1315 1316 if (VA.isRegLoc()) { 1317 // Arguments passed in registers 1318 EVT RegVT = VA.getLocVT(); 1319 switch (RegVT.getSimpleVT().SimpleTy) { 1320 default: 1321 { 1322 #ifndef NDEBUG 1323 errs() << "LowerFormalArguments Unhandled argument type: " 1324 << RegVT.getSimpleVT().SimpleTy << "\n"; 1325 #endif 1326 llvm_unreachable(nullptr); 1327 } 1328 case MVT::i32: 1329 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1330 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1331 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT); 1332 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1)); 1333 } 1334 } else { 1335 // sanity check 1336 assert(VA.isMemLoc()); 1337 // Load the argument to a virtual register 1338 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1339 if (ObjSize > StackSlotSize) { 1340 errs() << "LowerFormalArguments Unhandled argument type: " 1341 << EVT(VA.getLocVT()).getEVTString() 1342 << "\n"; 1343 } 1344 // Create the frame index object for this incoming parameter... 1345 int FI = MFI->CreateFixedObject(ObjSize, 1346 LRSaveSize + VA.getLocMemOffset(), 1347 true); 1348 1349 // Create the SelectionDAG nodes corresponding to a load 1350 //from this parameter 1351 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1352 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1353 MachinePointerInfo::getFixedStack(FI), 1354 false, false, false, 0); 1355 } 1356 const ArgDataPair ADP = { ArgIn, Ins[i].Flags }; 1357 ArgData.push_back(ADP); 1358 } 1359 1360 // 1b. CopyFromReg vararg registers. 1361 if (isVarArg) { 1362 // Argument registers 1363 static const MCPhysReg ArgRegs[] = { 1364 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1365 }; 1366 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1367 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, 1368 array_lengthof(ArgRegs)); 1369 if (FirstVAReg < array_lengthof(ArgRegs)) { 1370 int offset = 0; 1371 // Save remaining registers, storing higher register numbers at a higher 1372 // address 1373 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1374 // Create a stack slot 1375 int FI = MFI->CreateFixedObject(4, offset, true); 1376 if (i == (int)FirstVAReg) { 1377 XFI->setVarArgsFrameIndex(FI); 1378 } 1379 offset -= StackSlotSize; 1380 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1381 // Move argument from phys reg -> virt reg 1382 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass); 1383 RegInfo.addLiveIn(ArgRegs[i], VReg); 1384 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1385 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1)); 1386 // Move argument from virt reg -> stack 1387 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1388 MachinePointerInfo(), false, false, 0); 1389 MemOps.push_back(Store); 1390 } 1391 } else { 1392 // This will point to the next argument passed via stack. 1393 XFI->setVarArgsFrameIndex( 1394 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1395 true)); 1396 } 1397 } 1398 1399 // 2. chain CopyFromReg nodes into a TokenFactor. 1400 if (!CFRegNode.empty()) 1401 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, CFRegNode); 1402 1403 // 3. Memcpy 'byVal' args & push final InVals. 1404 // Aggregates passed "byVal" need to be copied by the callee. 1405 // The callee will use a pointer to this copy, rather than the original 1406 // pointer. 1407 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(), 1408 ArgDE = ArgData.end(); 1409 ArgDI != ArgDE; ++ArgDI) { 1410 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) { 1411 unsigned Size = ArgDI->Flags.getByValSize(); 1412 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign()); 1413 // Create a new object on the stack and copy the pointee into it. 1414 int FI = MFI->CreateStackObject(Size, Align, false); 1415 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1416 InVals.push_back(FIN); 1417 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV, 1418 DAG.getConstant(Size, MVT::i32), 1419 Align, false, false, 1420 MachinePointerInfo(), 1421 MachinePointerInfo())); 1422 } else { 1423 InVals.push_back(ArgDI->SDV); 1424 } 1425 } 1426 1427 // 4, chain mem ops nodes into a TokenFactor. 1428 if (!MemOps.empty()) { 1429 MemOps.push_back(Chain); 1430 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 1431 } 1432 1433 return Chain; 1434 } 1435 1436 //===----------------------------------------------------------------------===// 1437 // Return Value Calling Convention Implementation 1438 //===----------------------------------------------------------------------===// 1439 1440 bool XCoreTargetLowering:: 1441 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1442 bool isVarArg, 1443 const SmallVectorImpl<ISD::OutputArg> &Outs, 1444 LLVMContext &Context) const { 1445 SmallVector<CCValAssign, 16> RVLocs; 1446 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 1447 if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) 1448 return false; 1449 if (CCInfo.getNextStackOffset() != 0 && isVarArg) 1450 return false; 1451 return true; 1452 } 1453 1454 SDValue 1455 XCoreTargetLowering::LowerReturn(SDValue Chain, 1456 CallingConv::ID CallConv, bool isVarArg, 1457 const SmallVectorImpl<ISD::OutputArg> &Outs, 1458 const SmallVectorImpl<SDValue> &OutVals, 1459 SDLoc dl, SelectionDAG &DAG) const { 1460 1461 XCoreFunctionInfo *XFI = 1462 DAG.getMachineFunction().getInfo<XCoreFunctionInfo>(); 1463 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 1464 1465 // CCValAssign - represent the assignment of 1466 // the return value to a location 1467 SmallVector<CCValAssign, 16> RVLocs; 1468 1469 // CCState - Info about the registers and stack slot. 1470 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1471 getTargetMachine(), RVLocs, *DAG.getContext()); 1472 1473 // Analyze return values. 1474 if (!isVarArg) 1475 CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4); 1476 1477 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1478 1479 SDValue Flag; 1480 SmallVector<SDValue, 4> RetOps(1, Chain); 1481 1482 // Return on XCore is always a "retsp 0" 1483 RetOps.push_back(DAG.getConstant(0, MVT::i32)); 1484 1485 SmallVector<SDValue, 4> MemOpChains; 1486 // Handle return values that must be copied to memory. 1487 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1488 CCValAssign &VA = RVLocs[i]; 1489 if (VA.isRegLoc()) 1490 continue; 1491 assert(VA.isMemLoc()); 1492 if (isVarArg) { 1493 report_fatal_error("Can't return value from vararg function in memory"); 1494 } 1495 1496 int Offset = VA.getLocMemOffset(); 1497 unsigned ObjSize = VA.getLocVT().getSizeInBits() / 8; 1498 // Create the frame index object for the memory location. 1499 int FI = MFI->CreateFixedObject(ObjSize, Offset, false); 1500 1501 // Create a SelectionDAG node corresponding to a store 1502 // to this memory location. 1503 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1504 MemOpChains.push_back(DAG.getStore(Chain, dl, OutVals[i], FIN, 1505 MachinePointerInfo::getFixedStack(FI), false, false, 1506 0)); 1507 } 1508 1509 // Transform all store nodes into one single node because 1510 // all stores are independent of each other. 1511 if (!MemOpChains.empty()) 1512 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1513 1514 // Now handle return values copied to registers. 1515 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1516 CCValAssign &VA = RVLocs[i]; 1517 if (!VA.isRegLoc()) 1518 continue; 1519 // Copy the result values into the output registers. 1520 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1521 1522 // guarantee that all emitted copies are 1523 // stuck together, avoiding something bad 1524 Flag = Chain.getValue(1); 1525 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1526 } 1527 1528 RetOps[0] = Chain; // Update chain. 1529 1530 // Add the flag if we have it. 1531 if (Flag.getNode()) 1532 RetOps.push_back(Flag); 1533 1534 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, RetOps); 1535 } 1536 1537 //===----------------------------------------------------------------------===// 1538 // Other Lowering Code 1539 //===----------------------------------------------------------------------===// 1540 1541 MachineBasicBlock * 1542 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1543 MachineBasicBlock *BB) const { 1544 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 1545 DebugLoc dl = MI->getDebugLoc(); 1546 assert((MI->getOpcode() == XCore::SELECT_CC) && 1547 "Unexpected instr type to insert"); 1548 1549 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1550 // control-flow pattern. The incoming instruction knows the destination vreg 1551 // to set, the condition code register to branch on, the true/false values to 1552 // select between, and a branch opcode to use. 1553 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1554 MachineFunction::iterator It = BB; 1555 ++It; 1556 1557 // thisMBB: 1558 // ... 1559 // TrueVal = ... 1560 // cmpTY ccX, r1, r2 1561 // bCC copy1MBB 1562 // fallthrough --> copy0MBB 1563 MachineBasicBlock *thisMBB = BB; 1564 MachineFunction *F = BB->getParent(); 1565 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1566 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1567 F->insert(It, copy0MBB); 1568 F->insert(It, sinkMBB); 1569 1570 // Transfer the remainder of BB and its successor edges to sinkMBB. 1571 sinkMBB->splice(sinkMBB->begin(), BB, 1572 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 1573 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1574 1575 // Next, add the true and fallthrough blocks as its successors. 1576 BB->addSuccessor(copy0MBB); 1577 BB->addSuccessor(sinkMBB); 1578 1579 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1580 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1581 1582 // copy0MBB: 1583 // %FalseValue = ... 1584 // # fallthrough to sinkMBB 1585 BB = copy0MBB; 1586 1587 // Update machine-CFG edges 1588 BB->addSuccessor(sinkMBB); 1589 1590 // sinkMBB: 1591 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1592 // ... 1593 BB = sinkMBB; 1594 BuildMI(*BB, BB->begin(), dl, 1595 TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1596 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1597 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1598 1599 MI->eraseFromParent(); // The pseudo instruction is gone now. 1600 return BB; 1601 } 1602 1603 //===----------------------------------------------------------------------===// 1604 // Target Optimization Hooks 1605 //===----------------------------------------------------------------------===// 1606 1607 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1608 DAGCombinerInfo &DCI) const { 1609 SelectionDAG &DAG = DCI.DAG; 1610 SDLoc dl(N); 1611 switch (N->getOpcode()) { 1612 default: break; 1613 case ISD::INTRINSIC_VOID: 1614 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 1615 case Intrinsic::xcore_outt: 1616 case Intrinsic::xcore_outct: 1617 case Intrinsic::xcore_chkct: { 1618 SDValue OutVal = N->getOperand(3); 1619 // These instructions ignore the high bits. 1620 if (OutVal.hasOneUse()) { 1621 unsigned BitWidth = OutVal.getValueSizeInBits(); 1622 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8); 1623 APInt KnownZero, KnownOne; 1624 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1625 !DCI.isBeforeLegalizeOps()); 1626 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1627 if (TLO.ShrinkDemandedConstant(OutVal, DemandedMask) || 1628 TLI.SimplifyDemandedBits(OutVal, DemandedMask, KnownZero, KnownOne, 1629 TLO)) 1630 DCI.CommitTargetLoweringOpt(TLO); 1631 } 1632 break; 1633 } 1634 case Intrinsic::xcore_setpt: { 1635 SDValue Time = N->getOperand(3); 1636 // This instruction ignores the high bits. 1637 if (Time.hasOneUse()) { 1638 unsigned BitWidth = Time.getValueSizeInBits(); 1639 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16); 1640 APInt KnownZero, KnownOne; 1641 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 1642 !DCI.isBeforeLegalizeOps()); 1643 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 1644 if (TLO.ShrinkDemandedConstant(Time, DemandedMask) || 1645 TLI.SimplifyDemandedBits(Time, DemandedMask, KnownZero, KnownOne, 1646 TLO)) 1647 DCI.CommitTargetLoweringOpt(TLO); 1648 } 1649 break; 1650 } 1651 } 1652 break; 1653 case XCoreISD::LADD: { 1654 SDValue N0 = N->getOperand(0); 1655 SDValue N1 = N->getOperand(1); 1656 SDValue N2 = N->getOperand(2); 1657 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1658 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1659 EVT VT = N0.getValueType(); 1660 1661 // canonicalize constant to RHS 1662 if (N0C && !N1C) 1663 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1664 1665 // fold (ladd 0, 0, x) -> 0, x & 1 1666 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1667 SDValue Carry = DAG.getConstant(0, VT); 1668 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1669 DAG.getConstant(1, VT)); 1670 SDValue Ops[] = { Result, Carry }; 1671 return DAG.getMergeValues(Ops, dl); 1672 } 1673 1674 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1675 // low bit set 1676 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1677 APInt KnownZero, KnownOne; 1678 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1679 VT.getSizeInBits() - 1); 1680 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1681 if ((KnownZero & Mask) == Mask) { 1682 SDValue Carry = DAG.getConstant(0, VT); 1683 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1684 SDValue Ops[] = { Result, Carry }; 1685 return DAG.getMergeValues(Ops, dl); 1686 } 1687 } 1688 } 1689 break; 1690 case XCoreISD::LSUB: { 1691 SDValue N0 = N->getOperand(0); 1692 SDValue N1 = N->getOperand(1); 1693 SDValue N2 = N->getOperand(2); 1694 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1695 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1696 EVT VT = N0.getValueType(); 1697 1698 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1699 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1700 APInt KnownZero, KnownOne; 1701 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1702 VT.getSizeInBits() - 1); 1703 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1704 if ((KnownZero & Mask) == Mask) { 1705 SDValue Borrow = N2; 1706 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1707 DAG.getConstant(0, VT), N2); 1708 SDValue Ops[] = { Result, Borrow }; 1709 return DAG.getMergeValues(Ops, dl); 1710 } 1711 } 1712 1713 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1714 // low bit set 1715 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) { 1716 APInt KnownZero, KnownOne; 1717 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1718 VT.getSizeInBits() - 1); 1719 DAG.computeKnownBits(N2, KnownZero, KnownOne); 1720 if ((KnownZero & Mask) == Mask) { 1721 SDValue Borrow = DAG.getConstant(0, VT); 1722 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1723 SDValue Ops[] = { Result, Borrow }; 1724 return DAG.getMergeValues(Ops, dl); 1725 } 1726 } 1727 } 1728 break; 1729 case XCoreISD::LMUL: { 1730 SDValue N0 = N->getOperand(0); 1731 SDValue N1 = N->getOperand(1); 1732 SDValue N2 = N->getOperand(2); 1733 SDValue N3 = N->getOperand(3); 1734 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1735 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1736 EVT VT = N0.getValueType(); 1737 // Canonicalize multiplicative constant to RHS. If both multiplicative 1738 // operands are constant canonicalize smallest to RHS. 1739 if ((N0C && !N1C) || 1740 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1741 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1742 N1, N0, N2, N3); 1743 1744 // lmul(x, 0, a, b) 1745 if (N1C && N1C->isNullValue()) { 1746 // If the high result is unused fold to add(a, b) 1747 if (N->hasNUsesOfValue(0, 0)) { 1748 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1749 SDValue Ops[] = { Lo, Lo }; 1750 return DAG.getMergeValues(Ops, dl); 1751 } 1752 // Otherwise fold to ladd(a, b, 0) 1753 SDValue Result = 1754 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1755 SDValue Carry(Result.getNode(), 1); 1756 SDValue Ops[] = { Carry, Result }; 1757 return DAG.getMergeValues(Ops, dl); 1758 } 1759 } 1760 break; 1761 case ISD::ADD: { 1762 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1763 // lmul(x, y, a, b). The high result of lmul will be ignored. 1764 // This is only profitable if the intermediate results are unused 1765 // elsewhere. 1766 SDValue Mul0, Mul1, Addend0, Addend1; 1767 if (N->getValueType(0) == MVT::i32 && 1768 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1769 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1770 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1771 Mul1, Addend0, Addend1); 1772 SDValue Result(Ignored.getNode(), 1); 1773 return Result; 1774 } 1775 APInt HighMask = APInt::getHighBitsSet(64, 32); 1776 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1777 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1778 // before type legalization as it is messy to match the operands after 1779 // that. 1780 if (N->getValueType(0) == MVT::i64 && 1781 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1782 DAG.MaskedValueIsZero(Mul0, HighMask) && 1783 DAG.MaskedValueIsZero(Mul1, HighMask) && 1784 DAG.MaskedValueIsZero(Addend0, HighMask) && 1785 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1786 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1787 Mul0, DAG.getConstant(0, MVT::i32)); 1788 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1789 Mul1, DAG.getConstant(0, MVT::i32)); 1790 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1791 Addend0, DAG.getConstant(0, MVT::i32)); 1792 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1793 Addend1, DAG.getConstant(0, MVT::i32)); 1794 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1795 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1796 Addend0L, Addend1L); 1797 SDValue Lo(Hi.getNode(), 1); 1798 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1799 } 1800 } 1801 break; 1802 case ISD::STORE: { 1803 // Replace unaligned store of unaligned load with memmove. 1804 StoreSDNode *ST = cast<StoreSDNode>(N); 1805 if (!DCI.isBeforeLegalize() || 1806 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || 1807 ST->isVolatile() || ST->isIndexed()) { 1808 break; 1809 } 1810 SDValue Chain = ST->getChain(); 1811 1812 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1813 if (StoreBits % 8) { 1814 break; 1815 } 1816 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment( 1817 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1818 unsigned Alignment = ST->getAlignment(); 1819 if (Alignment >= ABIAlignment) { 1820 break; 1821 } 1822 1823 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1824 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1825 LD->getAlignment() == Alignment && 1826 !LD->isVolatile() && !LD->isIndexed() && 1827 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1828 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1829 LD->getBasePtr(), 1830 DAG.getConstant(StoreBits/8, MVT::i32), 1831 Alignment, false, ST->getPointerInfo(), 1832 LD->getPointerInfo()); 1833 } 1834 } 1835 break; 1836 } 1837 } 1838 return SDValue(); 1839 } 1840 1841 void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 1842 APInt &KnownZero, 1843 APInt &KnownOne, 1844 const SelectionDAG &DAG, 1845 unsigned Depth) const { 1846 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0); 1847 switch (Op.getOpcode()) { 1848 default: break; 1849 case XCoreISD::LADD: 1850 case XCoreISD::LSUB: 1851 if (Op.getResNo() == 1) { 1852 // Top bits of carry / borrow are clear. 1853 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1854 KnownZero.getBitWidth() - 1); 1855 } 1856 break; 1857 case ISD::INTRINSIC_W_CHAIN: 1858 { 1859 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1860 switch (IntNo) { 1861 case Intrinsic::xcore_getts: 1862 // High bits are known to be zero. 1863 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1864 KnownZero.getBitWidth() - 16); 1865 break; 1866 case Intrinsic::xcore_int: 1867 case Intrinsic::xcore_inct: 1868 // High bits are known to be zero. 1869 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1870 KnownZero.getBitWidth() - 8); 1871 break; 1872 case Intrinsic::xcore_testct: 1873 // Result is either 0 or 1. 1874 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1875 KnownZero.getBitWidth() - 1); 1876 break; 1877 case Intrinsic::xcore_testwct: 1878 // Result is in the range 0 - 4. 1879 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(), 1880 KnownZero.getBitWidth() - 3); 1881 break; 1882 } 1883 } 1884 break; 1885 } 1886 } 1887 1888 //===----------------------------------------------------------------------===// 1889 // Addressing mode description hooks 1890 //===----------------------------------------------------------------------===// 1891 1892 static inline bool isImmUs(int64_t val) 1893 { 1894 return (val >= 0 && val <= 11); 1895 } 1896 1897 static inline bool isImmUs2(int64_t val) 1898 { 1899 return (val%2 == 0 && isImmUs(val/2)); 1900 } 1901 1902 static inline bool isImmUs4(int64_t val) 1903 { 1904 return (val%4 == 0 && isImmUs(val/4)); 1905 } 1906 1907 /// isLegalAddressingMode - Return true if the addressing mode represented 1908 /// by AM is legal for this target, for a load/store of the specified type. 1909 bool 1910 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, 1911 Type *Ty) const { 1912 if (Ty->getTypeID() == Type::VoidTyID) 1913 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1914 1915 const DataLayout *TD = TM.getDataLayout(); 1916 unsigned Size = TD->getTypeAllocSize(Ty); 1917 if (AM.BaseGV) { 1918 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1919 AM.BaseOffs%4 == 0; 1920 } 1921 1922 switch (Size) { 1923 case 1: 1924 // reg + imm 1925 if (AM.Scale == 0) { 1926 return isImmUs(AM.BaseOffs); 1927 } 1928 // reg + reg 1929 return AM.Scale == 1 && AM.BaseOffs == 0; 1930 case 2: 1931 case 3: 1932 // reg + imm 1933 if (AM.Scale == 0) { 1934 return isImmUs2(AM.BaseOffs); 1935 } 1936 // reg + reg<<1 1937 return AM.Scale == 2 && AM.BaseOffs == 0; 1938 default: 1939 // reg + imm 1940 if (AM.Scale == 0) { 1941 return isImmUs4(AM.BaseOffs); 1942 } 1943 // reg + reg<<2 1944 return AM.Scale == 4 && AM.BaseOffs == 0; 1945 } 1946 } 1947 1948 //===----------------------------------------------------------------------===// 1949 // XCore Inline Assembly Support 1950 //===----------------------------------------------------------------------===// 1951 1952 std::pair<unsigned, const TargetRegisterClass*> 1953 XCoreTargetLowering:: 1954 getRegForInlineAsmConstraint(const std::string &Constraint, 1955 MVT VT) const { 1956 if (Constraint.size() == 1) { 1957 switch (Constraint[0]) { 1958 default : break; 1959 case 'r': 1960 return std::make_pair(0U, &XCore::GRRegsRegClass); 1961 } 1962 } 1963 // Use the default implementation in TargetLowering to convert the register 1964 // constraint into a member of a register class. 1965 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 1966 } 1967