1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the XCoreTargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "xcore-lower" 15 16 #include "XCoreISelLowering.h" 17 #include "XCoreMachineFunctionInfo.h" 18 #include "XCore.h" 19 #include "XCoreTargetObjectFile.h" 20 #include "XCoreTargetMachine.h" 21 #include "XCoreSubtarget.h" 22 #include "llvm/DerivedTypes.h" 23 #include "llvm/Function.h" 24 #include "llvm/Intrinsics.h" 25 #include "llvm/CallingConv.h" 26 #include "llvm/GlobalVariable.h" 27 #include "llvm/GlobalAlias.h" 28 #include "llvm/CodeGen/CallingConvLower.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineJumpTableInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/SelectionDAGISel.h" 35 #include "llvm/CodeGen/ValueTypes.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/ADT/VectorExtras.h" 40 using namespace llvm; 41 42 const char *XCoreTargetLowering:: 43 getTargetNodeName(unsigned Opcode) const 44 { 45 switch (Opcode) 46 { 47 case XCoreISD::BL : return "XCoreISD::BL"; 48 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper"; 49 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper"; 50 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper"; 51 case XCoreISD::STWSP : return "XCoreISD::STWSP"; 52 case XCoreISD::RETSP : return "XCoreISD::RETSP"; 53 case XCoreISD::LADD : return "XCoreISD::LADD"; 54 case XCoreISD::LSUB : return "XCoreISD::LSUB"; 55 case XCoreISD::LMUL : return "XCoreISD::LMUL"; 56 case XCoreISD::MACCU : return "XCoreISD::MACCU"; 57 case XCoreISD::MACCS : return "XCoreISD::MACCS"; 58 case XCoreISD::BR_JT : return "XCoreISD::BR_JT"; 59 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32"; 60 default : return NULL; 61 } 62 } 63 64 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM) 65 : TargetLowering(XTM, new XCoreTargetObjectFile()), 66 TM(XTM), 67 Subtarget(*XTM.getSubtargetImpl()) { 68 69 // Set up the register classes. 70 addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass); 71 72 // Compute derived properties from the register classes 73 computeRegisterProperties(); 74 75 // Division is expensive 76 setIntDivIsCheap(false); 77 78 setStackPointerRegisterToSaveRestore(XCore::SP); 79 80 setSchedulingPreference(Sched::RegPressure); 81 82 // Use i32 for setcc operations results (slt, sgt, ...). 83 setBooleanContents(ZeroOrOneBooleanContent); 84 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? 85 86 // XCore does not have the NodeTypes below. 87 setOperationAction(ISD::BR_CC, MVT::Other, Expand); 88 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 89 setOperationAction(ISD::ADDC, MVT::i32, Expand); 90 setOperationAction(ISD::ADDE, MVT::i32, Expand); 91 setOperationAction(ISD::SUBC, MVT::i32, Expand); 92 setOperationAction(ISD::SUBE, MVT::i32, Expand); 93 94 // Stop the combiner recombining select and set_cc 95 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); 96 97 // 64bit 98 setOperationAction(ISD::ADD, MVT::i64, Custom); 99 setOperationAction(ISD::SUB, MVT::i64, Custom); 100 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 101 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 102 setOperationAction(ISD::MULHS, MVT::i32, Expand); 103 setOperationAction(ISD::MULHU, MVT::i32, Expand); 104 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); 105 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); 106 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); 107 108 // Bit Manipulation 109 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 110 setOperationAction(ISD::ROTL , MVT::i32, Expand); 111 setOperationAction(ISD::ROTR , MVT::i32, Expand); 112 113 setOperationAction(ISD::TRAP, MVT::Other, Legal); 114 115 // Jump tables. 116 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 117 118 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 119 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom); 120 121 // Thread Local Storage 122 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 123 124 // Conversion of i64 -> double produces constantpool nodes 125 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 126 127 // Loads 128 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 129 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 130 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 131 132 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand); 133 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand); 134 135 // Custom expand misaligned loads / stores. 136 setOperationAction(ISD::LOAD, MVT::i32, Custom); 137 setOperationAction(ISD::STORE, MVT::i32, Custom); 138 139 // Varargs 140 setOperationAction(ISD::VAEND, MVT::Other, Expand); 141 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 142 setOperationAction(ISD::VAARG, MVT::Other, Custom); 143 setOperationAction(ISD::VASTART, MVT::Other, Custom); 144 145 // Dynamic stack 146 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 147 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 148 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 149 150 // TRAMPOLINE is custom lowered. 151 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 152 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 153 154 maxStoresPerMemset = maxStoresPerMemsetOptSize = 4; 155 maxStoresPerMemmove = maxStoresPerMemmoveOptSize 156 = maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 2; 157 158 // We have target-specific dag combine patterns for the following nodes: 159 setTargetDAGCombine(ISD::STORE); 160 setTargetDAGCombine(ISD::ADD); 161 162 setMinFunctionAlignment(1); 163 } 164 165 SDValue XCoreTargetLowering:: 166 LowerOperation(SDValue Op, SelectionDAG &DAG) const { 167 switch (Op.getOpcode()) 168 { 169 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 170 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 171 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 172 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 173 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 174 case ISD::LOAD: return LowerLOAD(Op, DAG); 175 case ISD::STORE: return LowerSTORE(Op, DAG); 176 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 177 case ISD::VAARG: return LowerVAARG(Op, DAG); 178 case ISD::VASTART: return LowerVASTART(Op, DAG); 179 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG); 180 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG); 181 // FIXME: Remove these when LegalizeDAGTypes lands. 182 case ISD::ADD: 183 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG); 184 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 185 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 186 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 187 default: 188 llvm_unreachable("unimplemented operand"); 189 return SDValue(); 190 } 191 } 192 193 /// ReplaceNodeResults - Replace the results of node with an illegal result 194 /// type with new values built out of custom code. 195 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N, 196 SmallVectorImpl<SDValue>&Results, 197 SelectionDAG &DAG) const { 198 switch (N->getOpcode()) { 199 default: 200 llvm_unreachable("Don't know how to custom expand this!"); 201 return; 202 case ISD::ADD: 203 case ISD::SUB: 204 Results.push_back(ExpandADDSUB(N, DAG)); 205 return; 206 } 207 } 208 209 //===----------------------------------------------------------------------===// 210 // Misc Lower Operation implementation 211 //===----------------------------------------------------------------------===// 212 213 SDValue XCoreTargetLowering:: 214 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const 215 { 216 DebugLoc dl = Op.getDebugLoc(); 217 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2), 218 Op.getOperand(3), Op.getOperand(4)); 219 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0), 220 Op.getOperand(1)); 221 } 222 223 SDValue XCoreTargetLowering:: 224 getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, 225 SelectionDAG &DAG) const 226 { 227 // FIXME there is no actual debug info here 228 DebugLoc dl = GA.getDebugLoc(); 229 if (isa<Function>(GV)) { 230 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA); 231 } 232 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 233 if (!GVar) { 234 // If GV is an alias then use the aliasee to determine constness 235 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 236 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 237 } 238 bool isConst = GVar && GVar->isConstant(); 239 if (isConst) { 240 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA); 241 } 242 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA); 243 } 244 245 SDValue XCoreTargetLowering:: 246 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const 247 { 248 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 249 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), MVT::i32); 250 return getGlobalAddressWrapper(GA, GV, DAG); 251 } 252 253 static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) { 254 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 255 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32)); 256 } 257 258 static inline bool isZeroLengthArray(Type *Ty) { 259 ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty); 260 return AT && (AT->getNumElements() == 0); 261 } 262 263 SDValue XCoreTargetLowering:: 264 LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const 265 { 266 // FIXME there isn't really debug info here 267 DebugLoc dl = Op.getDebugLoc(); 268 // transform to label + getid() * size 269 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 270 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32); 271 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 272 if (!GVar) { 273 // If GV is an alias then use the aliasee to determine size 274 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV)) 275 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal()); 276 } 277 if (! GVar) { 278 llvm_unreachable("Thread local object not a GlobalVariable?"); 279 return SDValue(); 280 } 281 Type *Ty = cast<PointerType>(GV->getType())->getElementType(); 282 if (!Ty->isSized() || isZeroLengthArray(Ty)) { 283 #ifndef NDEBUG 284 errs() << "Size of thread local object " << GVar->getName() 285 << " is unknown\n"; 286 #endif 287 llvm_unreachable(0); 288 } 289 SDValue base = getGlobalAddressWrapper(GA, GV, DAG); 290 const TargetData *TD = TM.getTargetData(); 291 unsigned Size = TD->getTypeAllocSize(Ty); 292 SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl), 293 DAG.getConstant(Size, MVT::i32)); 294 return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset); 295 } 296 297 SDValue XCoreTargetLowering:: 298 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const 299 { 300 DebugLoc DL = Op.getDebugLoc(); 301 302 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 303 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true); 304 305 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result); 306 } 307 308 SDValue XCoreTargetLowering:: 309 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const 310 { 311 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 312 // FIXME there isn't really debug info here 313 DebugLoc dl = CP->getDebugLoc(); 314 EVT PtrVT = Op.getValueType(); 315 SDValue Res; 316 if (CP->isMachineConstantPoolEntry()) { 317 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 318 CP->getAlignment()); 319 } else { 320 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 321 CP->getAlignment()); 322 } 323 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res); 324 } 325 326 unsigned XCoreTargetLowering::getJumpTableEncoding() const { 327 return MachineJumpTableInfo::EK_Inline; 328 } 329 330 SDValue XCoreTargetLowering:: 331 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const 332 { 333 SDValue Chain = Op.getOperand(0); 334 SDValue Table = Op.getOperand(1); 335 SDValue Index = Op.getOperand(2); 336 DebugLoc dl = Op.getDebugLoc(); 337 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 338 unsigned JTI = JT->getIndex(); 339 MachineFunction &MF = DAG.getMachineFunction(); 340 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo(); 341 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32); 342 343 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size(); 344 if (NumEntries <= 32) { 345 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index); 346 } 347 assert((NumEntries >> 31) == 0); 348 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index, 349 DAG.getConstant(1, MVT::i32)); 350 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT, 351 ScaledIndex); 352 } 353 354 static bool 355 IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase, 356 int64_t &Offset) 357 { 358 if (Addr.getOpcode() != ISD::ADD) { 359 return false; 360 } 361 ConstantSDNode *CN = 0; 362 if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { 363 return false; 364 } 365 int64_t off = CN->getSExtValue(); 366 const SDValue &Base = Addr.getOperand(0); 367 const SDValue *Root = &Base; 368 if (Base.getOpcode() == ISD::ADD && 369 Base.getOperand(1).getOpcode() == ISD::SHL) { 370 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1) 371 .getOperand(1)); 372 if (CN && (CN->getSExtValue() >= 2)) { 373 Root = &Base.getOperand(0); 374 } 375 } 376 if (isa<FrameIndexSDNode>(*Root)) { 377 // All frame indicies are word aligned 378 AlignedBase = Base; 379 Offset = off; 380 return true; 381 } 382 if (Root->getOpcode() == XCoreISD::DPRelativeWrapper || 383 Root->getOpcode() == XCoreISD::CPRelativeWrapper) { 384 // All dp / cp relative addresses are word aligned 385 AlignedBase = Base; 386 Offset = off; 387 return true; 388 } 389 return false; 390 } 391 392 SDValue XCoreTargetLowering:: 393 LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 394 LoadSDNode *LD = cast<LoadSDNode>(Op); 395 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && 396 "Unexpected extension type"); 397 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); 398 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) 399 return SDValue(); 400 401 unsigned ABIAlignment = getTargetData()-> 402 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); 403 // Leave aligned load alone. 404 if (LD->getAlignment() >= ABIAlignment) 405 return SDValue(); 406 407 SDValue Chain = LD->getChain(); 408 SDValue BasePtr = LD->getBasePtr(); 409 DebugLoc DL = Op.getDebugLoc(); 410 411 SDValue Base; 412 int64_t Offset; 413 if (!LD->isVolatile() && 414 IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) { 415 if (Offset % 4 == 0) { 416 // We've managed to infer better alignment information than the load 417 // already has. Use an aligned load. 418 // 419 return DAG.getLoad(getPointerTy(), DL, Chain, BasePtr, 420 MachinePointerInfo(), 421 false, false, 0); 422 } 423 // Lower to 424 // ldw low, base[offset >> 2] 425 // ldw high, base[(offset >> 2) + 1] 426 // shr low_shifted, low, (offset & 0x3) * 8 427 // shl high_shifted, high, 32 - (offset & 0x3) * 8 428 // or result, low_shifted, high_shifted 429 SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32); 430 SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32); 431 SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32); 432 SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32); 433 434 SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset); 435 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset); 436 437 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain, 438 LowAddr, MachinePointerInfo(), false, false, 0); 439 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain, 440 HighAddr, MachinePointerInfo(), false, false, 0); 441 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift); 442 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift); 443 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted); 444 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 445 High.getValue(1)); 446 SDValue Ops[] = { Result, Chain }; 447 return DAG.getMergeValues(Ops, 2, DL); 448 } 449 450 if (LD->getAlignment() == 2) { 451 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain, 452 BasePtr, LD->getPointerInfo(), MVT::i16, 453 LD->isVolatile(), LD->isNonTemporal(), 2); 454 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 455 DAG.getConstant(2, MVT::i32)); 456 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 457 HighAddr, 458 LD->getPointerInfo().getWithOffset(2), 459 MVT::i16, LD->isVolatile(), 460 LD->isNonTemporal(), 2); 461 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, 462 DAG.getConstant(16, MVT::i32)); 463 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted); 464 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1), 465 High.getValue(1)); 466 SDValue Ops[] = { Result, Chain }; 467 return DAG.getMergeValues(Ops, 2, DL); 468 } 469 470 // Lower to a call to __misaligned_load(BasePtr). 471 Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); 472 TargetLowering::ArgListTy Args; 473 TargetLowering::ArgListEntry Entry; 474 475 Entry.Ty = IntPtrTy; 476 Entry.Node = BasePtr; 477 Args.push_back(Entry); 478 479 std::pair<SDValue, SDValue> CallResult = 480 LowerCallTo(Chain, IntPtrTy, false, false, 481 false, false, 0, CallingConv::C, false, 482 /*isReturnValueUsed=*/true, 483 DAG.getExternalSymbol("__misaligned_load", getPointerTy()), 484 Args, DAG, DL); 485 486 SDValue Ops[] = 487 { CallResult.first, CallResult.second }; 488 489 return DAG.getMergeValues(Ops, 2, DL); 490 } 491 492 SDValue XCoreTargetLowering:: 493 LowerSTORE(SDValue Op, SelectionDAG &DAG) const 494 { 495 StoreSDNode *ST = cast<StoreSDNode>(Op); 496 assert(!ST->isTruncatingStore() && "Unexpected store type"); 497 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); 498 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { 499 return SDValue(); 500 } 501 unsigned ABIAlignment = getTargetData()-> 502 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); 503 // Leave aligned store alone. 504 if (ST->getAlignment() >= ABIAlignment) { 505 return SDValue(); 506 } 507 SDValue Chain = ST->getChain(); 508 SDValue BasePtr = ST->getBasePtr(); 509 SDValue Value = ST->getValue(); 510 DebugLoc dl = Op.getDebugLoc(); 511 512 if (ST->getAlignment() == 2) { 513 SDValue Low = Value; 514 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value, 515 DAG.getConstant(16, MVT::i32)); 516 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr, 517 ST->getPointerInfo(), MVT::i16, 518 ST->isVolatile(), ST->isNonTemporal(), 519 2); 520 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr, 521 DAG.getConstant(2, MVT::i32)); 522 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr, 523 ST->getPointerInfo().getWithOffset(2), 524 MVT::i16, ST->isVolatile(), 525 ST->isNonTemporal(), 2); 526 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh); 527 } 528 529 // Lower to a call to __misaligned_store(BasePtr, Value). 530 Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); 531 TargetLowering::ArgListTy Args; 532 TargetLowering::ArgListEntry Entry; 533 534 Entry.Ty = IntPtrTy; 535 Entry.Node = BasePtr; 536 Args.push_back(Entry); 537 538 Entry.Node = Value; 539 Args.push_back(Entry); 540 541 std::pair<SDValue, SDValue> CallResult = 542 LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false, 543 false, false, 0, CallingConv::C, false, 544 /*isReturnValueUsed=*/true, 545 DAG.getExternalSymbol("__misaligned_store", getPointerTy()), 546 Args, DAG, dl); 547 548 return CallResult.second; 549 } 550 551 SDValue XCoreTargetLowering:: 552 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 553 { 554 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI && 555 "Unexpected operand to lower!"); 556 DebugLoc dl = Op.getDebugLoc(); 557 SDValue LHS = Op.getOperand(0); 558 SDValue RHS = Op.getOperand(1); 559 SDValue Zero = DAG.getConstant(0, MVT::i32); 560 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 561 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero, 562 LHS, RHS); 563 SDValue Lo(Hi.getNode(), 1); 564 SDValue Ops[] = { Lo, Hi }; 565 return DAG.getMergeValues(Ops, 2, dl); 566 } 567 568 SDValue XCoreTargetLowering:: 569 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const 570 { 571 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI && 572 "Unexpected operand to lower!"); 573 DebugLoc dl = Op.getDebugLoc(); 574 SDValue LHS = Op.getOperand(0); 575 SDValue RHS = Op.getOperand(1); 576 SDValue Zero = DAG.getConstant(0, MVT::i32); 577 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 578 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS, 579 Zero, Zero); 580 SDValue Lo(Hi.getNode(), 1); 581 SDValue Ops[] = { Lo, Hi }; 582 return DAG.getMergeValues(Ops, 2, dl); 583 } 584 585 /// isADDADDMUL - Return whether Op is in a form that is equivalent to 586 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then 587 /// each intermediate result in the calculation must also have a single use. 588 /// If the Op is in the correct form the constituent parts are written to Mul0, 589 /// Mul1, Addend0 and Addend1. 590 static bool 591 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0, 592 SDValue &Addend1, bool requireIntermediatesHaveOneUse) 593 { 594 if (Op.getOpcode() != ISD::ADD) 595 return false; 596 SDValue N0 = Op.getOperand(0); 597 SDValue N1 = Op.getOperand(1); 598 SDValue AddOp; 599 SDValue OtherOp; 600 if (N0.getOpcode() == ISD::ADD) { 601 AddOp = N0; 602 OtherOp = N1; 603 } else if (N1.getOpcode() == ISD::ADD) { 604 AddOp = N1; 605 OtherOp = N0; 606 } else { 607 return false; 608 } 609 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse()) 610 return false; 611 if (OtherOp.getOpcode() == ISD::MUL) { 612 // add(add(a,b),mul(x,y)) 613 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse()) 614 return false; 615 Mul0 = OtherOp.getOperand(0); 616 Mul1 = OtherOp.getOperand(1); 617 Addend0 = AddOp.getOperand(0); 618 Addend1 = AddOp.getOperand(1); 619 return true; 620 } 621 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) { 622 // add(add(mul(x,y),a),b) 623 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse()) 624 return false; 625 Mul0 = AddOp.getOperand(0).getOperand(0); 626 Mul1 = AddOp.getOperand(0).getOperand(1); 627 Addend0 = AddOp.getOperand(1); 628 Addend1 = OtherOp; 629 return true; 630 } 631 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) { 632 // add(add(a,mul(x,y)),b) 633 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse()) 634 return false; 635 Mul0 = AddOp.getOperand(1).getOperand(0); 636 Mul1 = AddOp.getOperand(1).getOperand(1); 637 Addend0 = AddOp.getOperand(0); 638 Addend1 = OtherOp; 639 return true; 640 } 641 return false; 642 } 643 644 SDValue XCoreTargetLowering:: 645 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const 646 { 647 SDValue Mul; 648 SDValue Other; 649 if (N->getOperand(0).getOpcode() == ISD::MUL) { 650 Mul = N->getOperand(0); 651 Other = N->getOperand(1); 652 } else if (N->getOperand(1).getOpcode() == ISD::MUL) { 653 Mul = N->getOperand(1); 654 Other = N->getOperand(0); 655 } else { 656 return SDValue(); 657 } 658 DebugLoc dl = N->getDebugLoc(); 659 SDValue LL, RL, AddendL, AddendH; 660 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 661 Mul.getOperand(0), DAG.getConstant(0, MVT::i32)); 662 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 663 Mul.getOperand(1), DAG.getConstant(0, MVT::i32)); 664 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 665 Other, DAG.getConstant(0, MVT::i32)); 666 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 667 Other, DAG.getConstant(1, MVT::i32)); 668 APInt HighMask = APInt::getHighBitsSet(64, 32); 669 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0)); 670 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1)); 671 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) && 672 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) { 673 // The inputs are both zero-extended. 674 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 675 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 676 AddendL, LL, RL); 677 SDValue Lo(Hi.getNode(), 1); 678 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 679 } 680 if (LHSSB > 32 && RHSSB > 32) { 681 // The inputs are both sign-extended. 682 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl, 683 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 684 AddendL, LL, RL); 685 SDValue Lo(Hi.getNode(), 1); 686 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 687 } 688 SDValue LH, RH; 689 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 690 Mul.getOperand(0), DAG.getConstant(1, MVT::i32)); 691 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 692 Mul.getOperand(1), DAG.getConstant(1, MVT::i32)); 693 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl, 694 DAG.getVTList(MVT::i32, MVT::i32), AddendH, 695 AddendL, LL, RL); 696 SDValue Lo(Hi.getNode(), 1); 697 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH); 698 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL); 699 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH); 700 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH); 701 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 702 } 703 704 SDValue XCoreTargetLowering:: 705 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const 706 { 707 assert(N->getValueType(0) == MVT::i64 && 708 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && 709 "Unknown operand to lower!"); 710 711 if (N->getOpcode() == ISD::ADD) { 712 SDValue Result = TryExpandADDWithMul(N, DAG); 713 if (Result.getNode() != 0) 714 return Result; 715 } 716 717 DebugLoc dl = N->getDebugLoc(); 718 719 // Extract components 720 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 721 N->getOperand(0), DAG.getConstant(0, MVT::i32)); 722 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 723 N->getOperand(0), DAG.getConstant(1, MVT::i32)); 724 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 725 N->getOperand(1), DAG.getConstant(0, MVT::i32)); 726 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 727 N->getOperand(1), DAG.getConstant(1, MVT::i32)); 728 729 // Expand 730 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD : 731 XCoreISD::LSUB; 732 SDValue Zero = DAG.getConstant(0, MVT::i32); 733 SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 734 LHSL, RHSL, Zero); 735 SDValue Lo(Carry.getNode(), 1); 736 737 SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32), 738 LHSH, RHSH, Carry); 739 SDValue Hi(Ignored.getNode(), 1); 740 // Merge the pieces 741 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 742 } 743 744 SDValue XCoreTargetLowering:: 745 LowerVAARG(SDValue Op, SelectionDAG &DAG) const 746 { 747 llvm_unreachable("unimplemented"); 748 // FIX Arguments passed by reference need a extra dereference. 749 SDNode *Node = Op.getNode(); 750 DebugLoc dl = Node->getDebugLoc(); 751 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 752 EVT VT = Node->getValueType(0); 753 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0), 754 Node->getOperand(1), MachinePointerInfo(V), 755 false, false, 0); 756 // Increment the pointer, VAList, to the next vararg 757 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList, 758 DAG.getConstant(VT.getSizeInBits(), 759 getPointerTy())); 760 // Store the incremented VAList to the legalized pointer 761 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), 762 MachinePointerInfo(V), false, false, 0); 763 // Load the actual argument out of the pointer VAList 764 return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(), 765 false, false, 0); 766 } 767 768 SDValue XCoreTargetLowering:: 769 LowerVASTART(SDValue Op, SelectionDAG &DAG) const 770 { 771 DebugLoc dl = Op.getDebugLoc(); 772 // vastart stores the address of the VarArgsFrameIndex slot into the 773 // memory location argument 774 MachineFunction &MF = DAG.getMachineFunction(); 775 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 776 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32); 777 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), 778 MachinePointerInfo(), false, false, 0); 779 } 780 781 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, 782 SelectionDAG &DAG) const { 783 DebugLoc dl = Op.getDebugLoc(); 784 // Depths > 0 not supported yet! 785 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0) 786 return SDValue(); 787 788 MachineFunction &MF = DAG.getMachineFunction(); 789 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo(); 790 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, 791 RegInfo->getFrameRegister(MF), MVT::i32); 792 } 793 794 SDValue XCoreTargetLowering:: 795 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 796 return Op.getOperand(0); 797 } 798 799 SDValue XCoreTargetLowering:: 800 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const { 801 SDValue Chain = Op.getOperand(0); 802 SDValue Trmp = Op.getOperand(1); // trampoline 803 SDValue FPtr = Op.getOperand(2); // nested function 804 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 805 806 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 807 808 // .align 4 809 // LDAPF_u10 r11, nest 810 // LDW_2rus r11, r11[0] 811 // STWSP_ru6 r11, sp[0] 812 // LDAPF_u10 r11, fptr 813 // LDW_2rus r11, r11[0] 814 // BAU_1r r11 815 // nest: 816 // .word nest 817 // fptr: 818 // .word fptr 819 SDValue OutChains[5]; 820 821 SDValue Addr = Trmp; 822 823 DebugLoc dl = Op.getDebugLoc(); 824 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32), 825 Addr, MachinePointerInfo(TrmpAddr), false, false, 826 0); 827 828 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 829 DAG.getConstant(4, MVT::i32)); 830 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32), 831 Addr, MachinePointerInfo(TrmpAddr, 4), false, 832 false, 0); 833 834 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 835 DAG.getConstant(8, MVT::i32)); 836 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32), 837 Addr, MachinePointerInfo(TrmpAddr, 8), false, 838 false, 0); 839 840 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 841 DAG.getConstant(12, MVT::i32)); 842 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr, 843 MachinePointerInfo(TrmpAddr, 12), false, false, 844 0); 845 846 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, 847 DAG.getConstant(16, MVT::i32)); 848 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr, 849 MachinePointerInfo(TrmpAddr, 16), false, false, 850 0); 851 852 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5); 853 } 854 855 //===----------------------------------------------------------------------===// 856 // Calling Convention Implementation 857 //===----------------------------------------------------------------------===// 858 859 #include "XCoreGenCallingConv.inc" 860 861 //===----------------------------------------------------------------------===// 862 // Call Calling Convention Implementation 863 //===----------------------------------------------------------------------===// 864 865 /// XCore call implementation 866 SDValue 867 XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee, 868 CallingConv::ID CallConv, bool isVarArg, 869 bool &isTailCall, 870 const SmallVectorImpl<ISD::OutputArg> &Outs, 871 const SmallVectorImpl<SDValue> &OutVals, 872 const SmallVectorImpl<ISD::InputArg> &Ins, 873 DebugLoc dl, SelectionDAG &DAG, 874 SmallVectorImpl<SDValue> &InVals) const { 875 // XCore target does not yet support tail call optimization. 876 isTailCall = false; 877 878 // For now, only CallingConv::C implemented 879 switch (CallConv) 880 { 881 default: 882 llvm_unreachable("Unsupported calling convention"); 883 case CallingConv::Fast: 884 case CallingConv::C: 885 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall, 886 Outs, OutVals, Ins, dl, DAG, InVals); 887 } 888 } 889 890 /// LowerCCCCallTo - functions arguments are copied from virtual 891 /// regs to (physical regs)/(stack frame), CALLSEQ_START and 892 /// CALLSEQ_END are emitted. 893 /// TODO: isTailCall, sret. 894 SDValue 895 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, 896 CallingConv::ID CallConv, bool isVarArg, 897 bool isTailCall, 898 const SmallVectorImpl<ISD::OutputArg> &Outs, 899 const SmallVectorImpl<SDValue> &OutVals, 900 const SmallVectorImpl<ISD::InputArg> &Ins, 901 DebugLoc dl, SelectionDAG &DAG, 902 SmallVectorImpl<SDValue> &InVals) const { 903 904 // Analyze operands of the call, assigning locations to each operand. 905 SmallVector<CCValAssign, 16> ArgLocs; 906 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 907 getTargetMachine(), ArgLocs, *DAG.getContext()); 908 909 // The ABI dictates there should be one stack slot available to the callee 910 // on function entry (for saving lr). 911 CCInfo.AllocateStack(4, 4); 912 913 CCInfo.AnalyzeCallOperands(Outs, CC_XCore); 914 915 // Get a count of how many bytes are to be pushed on the stack. 916 unsigned NumBytes = CCInfo.getNextStackOffset(); 917 918 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, 919 getPointerTy(), true)); 920 921 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass; 922 SmallVector<SDValue, 12> MemOpChains; 923 924 // Walk the register/memloc assignments, inserting copies/loads. 925 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 926 CCValAssign &VA = ArgLocs[i]; 927 SDValue Arg = OutVals[i]; 928 929 // Promote the value if needed. 930 switch (VA.getLocInfo()) { 931 default: llvm_unreachable("Unknown loc info!"); 932 case CCValAssign::Full: break; 933 case CCValAssign::SExt: 934 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 935 break; 936 case CCValAssign::ZExt: 937 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 938 break; 939 case CCValAssign::AExt: 940 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 941 break; 942 } 943 944 // Arguments that can be passed on register must be kept at 945 // RegsToPass vector 946 if (VA.isRegLoc()) { 947 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 948 } else { 949 assert(VA.isMemLoc()); 950 951 int Offset = VA.getLocMemOffset(); 952 953 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other, 954 Chain, Arg, 955 DAG.getConstant(Offset/4, MVT::i32))); 956 } 957 } 958 959 // Transform all store nodes into one single node because 960 // all store nodes are independent of each other. 961 if (!MemOpChains.empty()) 962 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 963 &MemOpChains[0], MemOpChains.size()); 964 965 // Build a sequence of copy-to-reg nodes chained together with token 966 // chain and flag operands which copy the outgoing args into registers. 967 // The InFlag in necessary since all emitted instructions must be 968 // stuck together. 969 SDValue InFlag; 970 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 971 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 972 RegsToPass[i].second, InFlag); 973 InFlag = Chain.getValue(1); 974 } 975 976 // If the callee is a GlobalAddress node (quite common, every direct call is) 977 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 978 // Likewise ExternalSymbol -> TargetExternalSymbol. 979 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 980 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32); 981 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) 982 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); 983 984 // XCoreBranchLink = #chain, #target_address, #opt_in_flags... 985 // = Chain, Callee, Reg#1, Reg#2, ... 986 // 987 // Returns a chain & a flag for retval copy to use. 988 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 989 SmallVector<SDValue, 8> Ops; 990 Ops.push_back(Chain); 991 Ops.push_back(Callee); 992 993 // Add argument registers to the end of the list so that they are 994 // known live into the call. 995 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 996 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 997 RegsToPass[i].second.getValueType())); 998 999 if (InFlag.getNode()) 1000 Ops.push_back(InFlag); 1001 1002 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size()); 1003 InFlag = Chain.getValue(1); 1004 1005 // Create the CALLSEQ_END node. 1006 Chain = DAG.getCALLSEQ_END(Chain, 1007 DAG.getConstant(NumBytes, getPointerTy(), true), 1008 DAG.getConstant(0, getPointerTy(), true), 1009 InFlag); 1010 InFlag = Chain.getValue(1); 1011 1012 // Handle result values, copying them out of physregs into vregs that we 1013 // return. 1014 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 1015 Ins, dl, DAG, InVals); 1016 } 1017 1018 /// LowerCallResult - Lower the result values of a call into the 1019 /// appropriate copies out of appropriate physical registers. 1020 SDValue 1021 XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1022 CallingConv::ID CallConv, bool isVarArg, 1023 const SmallVectorImpl<ISD::InputArg> &Ins, 1024 DebugLoc dl, SelectionDAG &DAG, 1025 SmallVectorImpl<SDValue> &InVals) const { 1026 1027 // Assign locations to each value returned by this call. 1028 SmallVector<CCValAssign, 16> RVLocs; 1029 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1030 getTargetMachine(), RVLocs, *DAG.getContext()); 1031 1032 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore); 1033 1034 // Copy all of the result registers out of their specified physreg. 1035 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1036 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), 1037 RVLocs[i].getValVT(), InFlag).getValue(1); 1038 InFlag = Chain.getValue(2); 1039 InVals.push_back(Chain.getValue(0)); 1040 } 1041 1042 return Chain; 1043 } 1044 1045 //===----------------------------------------------------------------------===// 1046 // Formal Arguments Calling Convention Implementation 1047 //===----------------------------------------------------------------------===// 1048 1049 /// XCore formal arguments implementation 1050 SDValue 1051 XCoreTargetLowering::LowerFormalArguments(SDValue Chain, 1052 CallingConv::ID CallConv, 1053 bool isVarArg, 1054 const SmallVectorImpl<ISD::InputArg> &Ins, 1055 DebugLoc dl, 1056 SelectionDAG &DAG, 1057 SmallVectorImpl<SDValue> &InVals) 1058 const { 1059 switch (CallConv) 1060 { 1061 default: 1062 llvm_unreachable("Unsupported calling convention"); 1063 case CallingConv::C: 1064 case CallingConv::Fast: 1065 return LowerCCCArguments(Chain, CallConv, isVarArg, 1066 Ins, dl, DAG, InVals); 1067 } 1068 } 1069 1070 /// LowerCCCArguments - transform physical registers into 1071 /// virtual registers and generate load operations for 1072 /// arguments places on the stack. 1073 /// TODO: sret 1074 SDValue 1075 XCoreTargetLowering::LowerCCCArguments(SDValue Chain, 1076 CallingConv::ID CallConv, 1077 bool isVarArg, 1078 const SmallVectorImpl<ISD::InputArg> 1079 &Ins, 1080 DebugLoc dl, 1081 SelectionDAG &DAG, 1082 SmallVectorImpl<SDValue> &InVals) const { 1083 MachineFunction &MF = DAG.getMachineFunction(); 1084 MachineFrameInfo *MFI = MF.getFrameInfo(); 1085 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 1086 1087 // Assign locations to all of the incoming arguments. 1088 SmallVector<CCValAssign, 16> ArgLocs; 1089 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1090 getTargetMachine(), ArgLocs, *DAG.getContext()); 1091 1092 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); 1093 1094 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize(); 1095 1096 unsigned LRSaveSize = StackSlotSize; 1097 1098 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1099 1100 CCValAssign &VA = ArgLocs[i]; 1101 1102 if (VA.isRegLoc()) { 1103 // Arguments passed in registers 1104 EVT RegVT = VA.getLocVT(); 1105 switch (RegVT.getSimpleVT().SimpleTy) { 1106 default: 1107 { 1108 #ifndef NDEBUG 1109 errs() << "LowerFormalArguments Unhandled argument type: " 1110 << RegVT.getSimpleVT().SimpleTy << "\n"; 1111 #endif 1112 llvm_unreachable(0); 1113 } 1114 case MVT::i32: 1115 unsigned VReg = RegInfo.createVirtualRegister( 1116 XCore::GRRegsRegisterClass); 1117 RegInfo.addLiveIn(VA.getLocReg(), VReg); 1118 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT)); 1119 } 1120 } else { 1121 // sanity check 1122 assert(VA.isMemLoc()); 1123 // Load the argument to a virtual register 1124 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8; 1125 if (ObjSize > StackSlotSize) { 1126 errs() << "LowerFormalArguments Unhandled argument type: " 1127 << EVT(VA.getLocVT()).getEVTString() 1128 << "\n"; 1129 } 1130 // Create the frame index object for this incoming parameter... 1131 int FI = MFI->CreateFixedObject(ObjSize, 1132 LRSaveSize + VA.getLocMemOffset(), 1133 true); 1134 1135 // Create the SelectionDAG nodes corresponding to a load 1136 //from this parameter 1137 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1138 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1139 MachinePointerInfo::getFixedStack(FI), 1140 false, false, 0)); 1141 } 1142 } 1143 1144 if (isVarArg) { 1145 /* Argument registers */ 1146 static const unsigned ArgRegs[] = { 1147 XCore::R0, XCore::R1, XCore::R2, XCore::R3 1148 }; 1149 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>(); 1150 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs, 1151 array_lengthof(ArgRegs)); 1152 if (FirstVAReg < array_lengthof(ArgRegs)) { 1153 SmallVector<SDValue, 4> MemOps; 1154 int offset = 0; 1155 // Save remaining registers, storing higher register numbers at a higher 1156 // address 1157 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) { 1158 // Create a stack slot 1159 int FI = MFI->CreateFixedObject(4, offset, true); 1160 if (i == (int)FirstVAReg) { 1161 XFI->setVarArgsFrameIndex(FI); 1162 } 1163 offset -= StackSlotSize; 1164 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 1165 // Move argument from phys reg -> virt reg 1166 unsigned VReg = RegInfo.createVirtualRegister( 1167 XCore::GRRegsRegisterClass); 1168 RegInfo.addLiveIn(ArgRegs[i], VReg); 1169 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 1170 // Move argument from virt reg -> stack 1171 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 1172 MachinePointerInfo(), false, false, 0); 1173 MemOps.push_back(Store); 1174 } 1175 if (!MemOps.empty()) 1176 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1177 &MemOps[0], MemOps.size()); 1178 } else { 1179 // This will point to the next argument passed via stack. 1180 XFI->setVarArgsFrameIndex( 1181 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), 1182 true)); 1183 } 1184 } 1185 1186 return Chain; 1187 } 1188 1189 //===----------------------------------------------------------------------===// 1190 // Return Value Calling Convention Implementation 1191 //===----------------------------------------------------------------------===// 1192 1193 bool XCoreTargetLowering:: 1194 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, 1195 bool isVarArg, 1196 const SmallVectorImpl<ISD::OutputArg> &Outs, 1197 LLVMContext &Context) const { 1198 SmallVector<CCValAssign, 16> RVLocs; 1199 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 1200 return CCInfo.CheckReturn(Outs, RetCC_XCore); 1201 } 1202 1203 SDValue 1204 XCoreTargetLowering::LowerReturn(SDValue Chain, 1205 CallingConv::ID CallConv, bool isVarArg, 1206 const SmallVectorImpl<ISD::OutputArg> &Outs, 1207 const SmallVectorImpl<SDValue> &OutVals, 1208 DebugLoc dl, SelectionDAG &DAG) const { 1209 1210 // CCValAssign - represent the assignment of 1211 // the return value to a location 1212 SmallVector<CCValAssign, 16> RVLocs; 1213 1214 // CCState - Info about the registers and stack slot. 1215 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1216 getTargetMachine(), RVLocs, *DAG.getContext()); 1217 1218 // Analyze return values. 1219 CCInfo.AnalyzeReturn(Outs, RetCC_XCore); 1220 1221 // If this is the first return lowered for this function, add 1222 // the regs to the liveout set for the function. 1223 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { 1224 for (unsigned i = 0; i != RVLocs.size(); ++i) 1225 if (RVLocs[i].isRegLoc()) 1226 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); 1227 } 1228 1229 SDValue Flag; 1230 1231 // Copy the result values into the output registers. 1232 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1233 CCValAssign &VA = RVLocs[i]; 1234 assert(VA.isRegLoc() && "Can only return in registers!"); 1235 1236 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 1237 OutVals[i], Flag); 1238 1239 // guarantee that all emitted copies are 1240 // stuck together, avoiding something bad 1241 Flag = Chain.getValue(1); 1242 } 1243 1244 // Return on XCore is always a "retsp 0" 1245 if (Flag.getNode()) 1246 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 1247 Chain, DAG.getConstant(0, MVT::i32), Flag); 1248 else // Return Void 1249 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other, 1250 Chain, DAG.getConstant(0, MVT::i32)); 1251 } 1252 1253 //===----------------------------------------------------------------------===// 1254 // Other Lowering Code 1255 //===----------------------------------------------------------------------===// 1256 1257 MachineBasicBlock * 1258 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 1259 MachineBasicBlock *BB) const { 1260 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo(); 1261 DebugLoc dl = MI->getDebugLoc(); 1262 assert((MI->getOpcode() == XCore::SELECT_CC) && 1263 "Unexpected instr type to insert"); 1264 1265 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond 1266 // control-flow pattern. The incoming instruction knows the destination vreg 1267 // to set, the condition code register to branch on, the true/false values to 1268 // select between, and a branch opcode to use. 1269 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1270 MachineFunction::iterator It = BB; 1271 ++It; 1272 1273 // thisMBB: 1274 // ... 1275 // TrueVal = ... 1276 // cmpTY ccX, r1, r2 1277 // bCC copy1MBB 1278 // fallthrough --> copy0MBB 1279 MachineBasicBlock *thisMBB = BB; 1280 MachineFunction *F = BB->getParent(); 1281 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 1282 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 1283 F->insert(It, copy0MBB); 1284 F->insert(It, sinkMBB); 1285 1286 // Transfer the remainder of BB and its successor edges to sinkMBB. 1287 sinkMBB->splice(sinkMBB->begin(), BB, 1288 llvm::next(MachineBasicBlock::iterator(MI)), 1289 BB->end()); 1290 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 1291 1292 // Next, add the true and fallthrough blocks as its successors. 1293 BB->addSuccessor(copy0MBB); 1294 BB->addSuccessor(sinkMBB); 1295 1296 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6)) 1297 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB); 1298 1299 // copy0MBB: 1300 // %FalseValue = ... 1301 // # fallthrough to sinkMBB 1302 BB = copy0MBB; 1303 1304 // Update machine-CFG edges 1305 BB->addSuccessor(sinkMBB); 1306 1307 // sinkMBB: 1308 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 1309 // ... 1310 BB = sinkMBB; 1311 BuildMI(*BB, BB->begin(), dl, 1312 TII.get(XCore::PHI), MI->getOperand(0).getReg()) 1313 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB) 1314 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 1315 1316 MI->eraseFromParent(); // The pseudo instruction is gone now. 1317 return BB; 1318 } 1319 1320 //===----------------------------------------------------------------------===// 1321 // Target Optimization Hooks 1322 //===----------------------------------------------------------------------===// 1323 1324 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, 1325 DAGCombinerInfo &DCI) const { 1326 SelectionDAG &DAG = DCI.DAG; 1327 DebugLoc dl = N->getDebugLoc(); 1328 switch (N->getOpcode()) { 1329 default: break; 1330 case XCoreISD::LADD: { 1331 SDValue N0 = N->getOperand(0); 1332 SDValue N1 = N->getOperand(1); 1333 SDValue N2 = N->getOperand(2); 1334 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1335 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1336 EVT VT = N0.getValueType(); 1337 1338 // canonicalize constant to RHS 1339 if (N0C && !N1C) 1340 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2); 1341 1342 // fold (ladd 0, 0, x) -> 0, x & 1 1343 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1344 SDValue Carry = DAG.getConstant(0, VT); 1345 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2, 1346 DAG.getConstant(1, VT)); 1347 SDValue Ops [] = { Carry, Result }; 1348 return DAG.getMergeValues(Ops, 2, dl); 1349 } 1350 1351 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the 1352 // low bit set 1353 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { 1354 APInt KnownZero, KnownOne; 1355 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1356 VT.getSizeInBits() - 1); 1357 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne); 1358 if (KnownZero == Mask) { 1359 SDValue Carry = DAG.getConstant(0, VT); 1360 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2); 1361 SDValue Ops [] = { Carry, Result }; 1362 return DAG.getMergeValues(Ops, 2, dl); 1363 } 1364 } 1365 } 1366 break; 1367 case XCoreISD::LSUB: { 1368 SDValue N0 = N->getOperand(0); 1369 SDValue N1 = N->getOperand(1); 1370 SDValue N2 = N->getOperand(2); 1371 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1372 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1373 EVT VT = N0.getValueType(); 1374 1375 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set 1376 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) { 1377 APInt KnownZero, KnownOne; 1378 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1379 VT.getSizeInBits() - 1); 1380 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne); 1381 if (KnownZero == Mask) { 1382 SDValue Borrow = N2; 1383 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, 1384 DAG.getConstant(0, VT), N2); 1385 SDValue Ops [] = { Borrow, Result }; 1386 return DAG.getMergeValues(Ops, 2, dl); 1387 } 1388 } 1389 1390 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the 1391 // low bit set 1392 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) { 1393 APInt KnownZero, KnownOne; 1394 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 1395 VT.getSizeInBits() - 1); 1396 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne); 1397 if (KnownZero == Mask) { 1398 SDValue Borrow = DAG.getConstant(0, VT); 1399 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2); 1400 SDValue Ops [] = { Borrow, Result }; 1401 return DAG.getMergeValues(Ops, 2, dl); 1402 } 1403 } 1404 } 1405 break; 1406 case XCoreISD::LMUL: { 1407 SDValue N0 = N->getOperand(0); 1408 SDValue N1 = N->getOperand(1); 1409 SDValue N2 = N->getOperand(2); 1410 SDValue N3 = N->getOperand(3); 1411 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 1412 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 1413 EVT VT = N0.getValueType(); 1414 // Canonicalize multiplicative constant to RHS. If both multiplicative 1415 // operands are constant canonicalize smallest to RHS. 1416 if ((N0C && !N1C) || 1417 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue())) 1418 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), 1419 N1, N0, N2, N3); 1420 1421 // lmul(x, 0, a, b) 1422 if (N1C && N1C->isNullValue()) { 1423 // If the high result is unused fold to add(a, b) 1424 if (N->hasNUsesOfValue(0, 0)) { 1425 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3); 1426 SDValue Ops [] = { Lo, Lo }; 1427 return DAG.getMergeValues(Ops, 2, dl); 1428 } 1429 // Otherwise fold to ladd(a, b, 0) 1430 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1); 1431 } 1432 } 1433 break; 1434 case ISD::ADD: { 1435 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) -> 1436 // lmul(x, y, a, b). The high result of lmul will be ignored. 1437 // This is only profitable if the intermediate results are unused 1438 // elsewhere. 1439 SDValue Mul0, Mul1, Addend0, Addend1; 1440 if (N->getValueType(0) == MVT::i32 && 1441 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) { 1442 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl, 1443 DAG.getVTList(MVT::i32, MVT::i32), Mul0, 1444 Mul1, Addend0, Addend1); 1445 SDValue Result(Ignored.getNode(), 1); 1446 return Result; 1447 } 1448 APInt HighMask = APInt::getHighBitsSet(64, 32); 1449 // Fold 64 bit expression such as add(add(mul(x,y),a),b) -> 1450 // lmul(x, y, a, b) if all operands are zero-extended. We do this 1451 // before type legalization as it is messy to match the operands after 1452 // that. 1453 if (N->getValueType(0) == MVT::i64 && 1454 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) && 1455 DAG.MaskedValueIsZero(Mul0, HighMask) && 1456 DAG.MaskedValueIsZero(Mul1, HighMask) && 1457 DAG.MaskedValueIsZero(Addend0, HighMask) && 1458 DAG.MaskedValueIsZero(Addend1, HighMask)) { 1459 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1460 Mul0, DAG.getConstant(0, MVT::i32)); 1461 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1462 Mul1, DAG.getConstant(0, MVT::i32)); 1463 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1464 Addend0, DAG.getConstant(0, MVT::i32)); 1465 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 1466 Addend1, DAG.getConstant(0, MVT::i32)); 1467 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl, 1468 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L, 1469 Addend0L, Addend1L); 1470 SDValue Lo(Hi.getNode(), 1); 1471 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 1472 } 1473 } 1474 break; 1475 case ISD::STORE: { 1476 // Replace unaligned store of unaligned load with memmove. 1477 StoreSDNode *ST = cast<StoreSDNode>(N); 1478 if (!DCI.isBeforeLegalize() || 1479 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) || 1480 ST->isVolatile() || ST->isIndexed()) { 1481 break; 1482 } 1483 SDValue Chain = ST->getChain(); 1484 1485 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits(); 1486 if (StoreBits % 8) { 1487 break; 1488 } 1489 unsigned ABIAlignment = getTargetData()->getABITypeAlignment( 1490 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); 1491 unsigned Alignment = ST->getAlignment(); 1492 if (Alignment >= ABIAlignment) { 1493 break; 1494 } 1495 1496 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) { 1497 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() && 1498 LD->getAlignment() == Alignment && 1499 !LD->isVolatile() && !LD->isIndexed() && 1500 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) { 1501 return DAG.getMemmove(Chain, dl, ST->getBasePtr(), 1502 LD->getBasePtr(), 1503 DAG.getConstant(StoreBits/8, MVT::i32), 1504 Alignment, false, ST->getPointerInfo(), 1505 LD->getPointerInfo()); 1506 } 1507 } 1508 break; 1509 } 1510 } 1511 return SDValue(); 1512 } 1513 1514 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1515 const APInt &Mask, 1516 APInt &KnownZero, 1517 APInt &KnownOne, 1518 const SelectionDAG &DAG, 1519 unsigned Depth) const { 1520 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1521 switch (Op.getOpcode()) { 1522 default: break; 1523 case XCoreISD::LADD: 1524 case XCoreISD::LSUB: 1525 if (Op.getResNo() == 0) { 1526 // Top bits of carry / borrow are clear. 1527 KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(), 1528 Mask.getBitWidth() - 1); 1529 KnownZero &= Mask; 1530 } 1531 break; 1532 } 1533 } 1534 1535 //===----------------------------------------------------------------------===// 1536 // Addressing mode description hooks 1537 //===----------------------------------------------------------------------===// 1538 1539 static inline bool isImmUs(int64_t val) 1540 { 1541 return (val >= 0 && val <= 11); 1542 } 1543 1544 static inline bool isImmUs2(int64_t val) 1545 { 1546 return (val%2 == 0 && isImmUs(val/2)); 1547 } 1548 1549 static inline bool isImmUs4(int64_t val) 1550 { 1551 return (val%4 == 0 && isImmUs(val/4)); 1552 } 1553 1554 /// isLegalAddressingMode - Return true if the addressing mode represented 1555 /// by AM is legal for this target, for a load/store of the specified type. 1556 bool 1557 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, 1558 Type *Ty) const { 1559 if (Ty->getTypeID() == Type::VoidTyID) 1560 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); 1561 1562 const TargetData *TD = TM.getTargetData(); 1563 unsigned Size = TD->getTypeAllocSize(Ty); 1564 if (AM.BaseGV) { 1565 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && 1566 AM.BaseOffs%4 == 0; 1567 } 1568 1569 switch (Size) { 1570 case 1: 1571 // reg + imm 1572 if (AM.Scale == 0) { 1573 return isImmUs(AM.BaseOffs); 1574 } 1575 // reg + reg 1576 return AM.Scale == 1 && AM.BaseOffs == 0; 1577 case 2: 1578 case 3: 1579 // reg + imm 1580 if (AM.Scale == 0) { 1581 return isImmUs2(AM.BaseOffs); 1582 } 1583 // reg + reg<<1 1584 return AM.Scale == 2 && AM.BaseOffs == 0; 1585 default: 1586 // reg + imm 1587 if (AM.Scale == 0) { 1588 return isImmUs4(AM.BaseOffs); 1589 } 1590 // reg + reg<<2 1591 return AM.Scale == 4 && AM.BaseOffs == 0; 1592 } 1593 1594 return false; 1595 } 1596 1597 //===----------------------------------------------------------------------===// 1598 // XCore Inline Assembly Support 1599 //===----------------------------------------------------------------------===// 1600 1601 std::pair<unsigned, const TargetRegisterClass*> 1602 XCoreTargetLowering:: 1603 getRegForInlineAsmConstraint(const std::string &Constraint, 1604 EVT VT) const { 1605 if (Constraint.size() == 1) { 1606 switch (Constraint[0]) { 1607 default : break; 1608 case 'r': 1609 return std::make_pair(0U, XCore::GRRegsRegisterClass); 1610 } 1611 } 1612 // Use the default implementation in TargetLowering to convert the register 1613 // constraint into a member of a register class. 1614 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 1615 } 1616