1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that PPC uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 16 #define LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 17 18 #include "llvm/Target/TargetLowering.h" 19 #include "llvm/CodeGen/SelectionDAG.h" 20 #include "PPC.h" 21 #include "PPCSubtarget.h" 22 23 namespace llvm { 24 namespace PPCISD { 25 enum NodeType { 26 // Start the numbering where the builtin ops and target ops leave off. 27 FIRST_NUMBER = ISD::BUILTIN_OP_END, 28 29 /// FSEL - Traditional three-operand fsel node. 30 /// 31 FSEL, 32 33 /// FCFID - The FCFID instruction, taking an f64 operand and producing 34 /// and f64 value containing the FP representation of the integer that 35 /// was temporarily in the f64 operand. 36 FCFID, 37 38 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 39 /// operand, producing an f64 value containing the integer representation 40 /// of that FP value. 41 FCTIDZ, FCTIWZ, 42 43 /// STFIWX - The STFIWX instruction. The first operand is an input token 44 /// chain, then an f64 value to store, then an address to store it to. 45 STFIWX, 46 47 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking 48 // three v4f32 operands and producing a v4f32 result. 49 VMADDFP, VNMSUBFP, 50 51 /// VPERM - The PPC VPERM Instruction. 52 /// 53 VPERM, 54 55 /// Hi/Lo - These represent the high and low 16-bit parts of a global 56 /// address respectively. These nodes have two operands, the first of 57 /// which must be a TargetGlobalAddress, and the second of which must be a 58 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C', 59 /// though these are usually folded into other nodes. 60 Hi, Lo, 61 62 TOC_ENTRY, 63 64 /// The following three target-specific nodes are used for calls through 65 /// function pointers in the 64-bit SVR4 ABI. 66 67 /// Restore the TOC from the TOC save area of the current stack frame. 68 /// This is basically a hard coded load instruction which additionally 69 /// takes/produces a flag. 70 TOC_RESTORE, 71 72 /// Like a regular LOAD but additionally taking/producing a flag. 73 LOAD, 74 75 /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is 76 /// a hard coded load instruction. 77 LOAD_TOC, 78 79 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) 80 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to 81 /// compute an allocation on the stack. 82 DYNALLOC, 83 84 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr 85 /// at function entry, used for PIC code. 86 GlobalBaseReg, 87 88 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit 89 /// shift amounts. These nodes are generated by the multi-precision shift 90 /// code. 91 SRL, SRA, SHL, 92 93 /// EXTSW_32 - This is the EXTSW instruction for use with "32-bit" 94 /// registers. 95 EXTSW_32, 96 97 /// CALL - A direct function call. 98 CALL_Darwin, CALL_SVR4, 99 100 /// NOP - Special NOP which follows 64-bit SVR4 calls. 101 NOP, 102 103 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a 104 /// MTCTR instruction. 105 MTCTR, 106 107 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a 108 /// BCTRL instruction. 109 BCTRL_Darwin, BCTRL_SVR4, 110 111 /// Return with a flag operand, matched by 'blr' 112 RET_FLAG, 113 114 /// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF 115 /// instructions. This copies the bits corresponding to the specified 116 /// CRREG into the resultant GPR. Bits corresponding to other CR regs 117 /// are undefined. 118 MFCR, 119 120 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* 121 /// instructions. For lack of better number, we use the opcode number 122 /// encoding for the OPC field to identify the compare. For example, 838 123 /// is VCMPGTSH. 124 VCMP, 125 126 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the 127 /// altivec VCMP*o instructions. For lack of better number, we use the 128 /// opcode number encoding for the OPC field to identify the compare. For 129 /// example, 838 is VCMPGTSH. 130 VCMPo, 131 132 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This 133 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the 134 /// condition register to branch on, OPC is the branch opcode to use (e.g. 135 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is 136 /// an optional input flag argument. 137 COND_BRANCH, 138 139 // The following 5 instructions are used only as part of the 140 // long double-to-int conversion sequence. 141 142 /// OUTFLAG = MFFS F8RC - This moves the FPSCR (not modelled) into the 143 /// register. 144 MFFS, 145 146 /// OUTFLAG = MTFSB0 INFLAG - This clears a bit in the FPSCR. 147 MTFSB0, 148 149 /// OUTFLAG = MTFSB1 INFLAG - This sets a bit in the FPSCR. 150 MTFSB1, 151 152 /// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with 153 /// rounding towards zero. It has flags added so it won't move past the 154 /// FPSCR-setting instructions. 155 FADDRTZ, 156 157 /// MTFSF = F8RC, INFLAG - This moves the register into the FPSCR. 158 MTFSF, 159 160 /// LARX = This corresponds to PPC l{w|d}arx instrcution: load and 161 /// reserve indexed. This is used to implement atomic operations. 162 LARX, 163 164 /// STCX = This corresponds to PPC stcx. instrcution: store conditional 165 /// indexed. This is used to implement atomic operations. 166 STCX, 167 168 /// TC_RETURN - A tail call return. 169 /// operand #0 chain 170 /// operand #1 callee (register or absolute) 171 /// operand #2 stack adjustment 172 /// operand #3 optional in flag 173 TC_RETURN, 174 175 /// STD_32 - This is the STD instruction for use with "32-bit" registers. 176 STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE, 177 178 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a 179 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of 180 /// the GPRC input, then stores it through Ptr. Type can be either i16 or 181 /// i32. 182 STBRX, 183 184 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a 185 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it, 186 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16 187 /// or i32. 188 LBRX 189 }; 190 } 191 192 /// Define some predicates that are used for node matching. 193 namespace PPC { 194 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 195 /// VPKUHUM instruction. 196 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 197 198 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 199 /// VPKUWUM instruction. 200 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary); 201 202 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 203 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). 204 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 205 bool isUnary); 206 207 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 208 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). 209 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 210 bool isUnary); 211 212 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 213 /// amount, otherwise return -1. 214 int isVSLDOIShuffleMask(SDNode *N, bool isUnary); 215 216 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 217 /// specifies a splat of a single element that is suitable for input to 218 /// VSPLTB/VSPLTH/VSPLTW. 219 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize); 220 221 /// isAllNegativeZeroVector - Returns true if all elements of build_vector 222 /// are -0.0. 223 bool isAllNegativeZeroVector(SDNode *N); 224 225 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 226 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 227 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize); 228 229 /// get_VSPLTI_elt - If this is a build_vector of constants which can be 230 /// formed by using a vspltis[bhw] instruction of the specified element 231 /// size, return the constant being splatted. The ByteSize field indicates 232 /// the number of bytes of each element [124] -> [bhw]. 233 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); 234 } 235 236 class PPCTargetLowering : public TargetLowering { 237 const PPCSubtarget &PPCSubTarget; 238 239 public: 240 explicit PPCTargetLowering(PPCTargetMachine &TM); 241 242 /// getTargetNodeName() - This method returns the name of a target specific 243 /// DAG node. 244 virtual const char *getTargetNodeName(unsigned Opcode) const; 245 246 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; } 247 248 /// getSetCCResultType - Return the ISD::SETCC ValueType 249 virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const; 250 251 /// getPreIndexedAddressParts - returns true by value, base pointer and 252 /// offset pointer and addressing mode by reference if the node's address 253 /// can be legally represented as pre-indexed load / store address. 254 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 255 SDValue &Offset, 256 ISD::MemIndexedMode &AM, 257 SelectionDAG &DAG) const; 258 259 /// SelectAddressRegReg - Given the specified addressed, check to see if it 260 /// can be represented as an indexed [r+r] operation. Returns false if it 261 /// can be more efficiently represented with [r+imm]. 262 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, 263 SelectionDAG &DAG) const; 264 265 /// SelectAddressRegImm - Returns true if the address N can be represented 266 /// by a base register plus a signed 16-bit displacement [r+imm], and if it 267 /// is not better represented as reg+reg. 268 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, 269 SelectionDAG &DAG) const; 270 271 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 272 /// represented as an indexed [r+r] operation. 273 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, 274 SelectionDAG &DAG) const; 275 276 /// SelectAddressRegImmShift - Returns true if the address N can be 277 /// represented by a base register plus a signed 14-bit displacement 278 /// [r+imm*4]. Suitable for use by STD and friends. 279 bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base, 280 SelectionDAG &DAG) const; 281 282 283 /// LowerOperation - Provide custom lowering hooks for some operations. 284 /// 285 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 286 287 /// ReplaceNodeResults - Replace the results of node with an illegal result 288 /// type with new values built out of custom code. 289 /// 290 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 291 SelectionDAG &DAG) const; 292 293 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 294 295 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 296 const APInt &Mask, 297 APInt &KnownZero, 298 APInt &KnownOne, 299 const SelectionDAG &DAG, 300 unsigned Depth = 0) const; 301 302 virtual MachineBasicBlock * 303 EmitInstrWithCustomInserter(MachineInstr *MI, 304 MachineBasicBlock *MBB) const; 305 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 306 MachineBasicBlock *MBB, bool is64Bit, 307 unsigned BinOpcode) const; 308 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI, 309 MachineBasicBlock *MBB, 310 bool is8bit, unsigned Opcode) const; 311 312 ConstraintType getConstraintType(const std::string &Constraint) const; 313 314 /// Examine constraint string and operand type and determine a weight value. 315 /// The operand object must already have been set up with the operand type. 316 ConstraintWeight getSingleConstraintMatchWeight( 317 AsmOperandInfo &info, const char *constraint) const; 318 319 std::pair<unsigned, const TargetRegisterClass*> 320 getRegForInlineAsmConstraint(const std::string &Constraint, 321 EVT VT) const; 322 323 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 324 /// function arguments in the caller parameter area. This is the actual 325 /// alignment, not its logarithm. 326 unsigned getByValTypeAlignment(Type *Ty) const; 327 328 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 329 /// vector. If it is invalid, don't add anything to Ops. 330 virtual void LowerAsmOperandForConstraint(SDValue Op, 331 std::string &Constraint, 332 std::vector<SDValue> &Ops, 333 SelectionDAG &DAG) const; 334 335 /// isLegalAddressingMode - Return true if the addressing mode represented 336 /// by AM is legal for this target, for a load/store of the specified type. 337 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 338 339 /// isLegalAddressImmediate - Return true if the integer value can be used 340 /// as the offset of the target addressing mode for load / store of the 341 /// given type. 342 virtual bool isLegalAddressImmediate(int64_t V, Type *Ty) const; 343 344 /// isLegalAddressImmediate - Return true if the GlobalValue can be used as 345 /// the offset of the target addressing mode. 346 virtual bool isLegalAddressImmediate(GlobalValue *GV) const; 347 348 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 349 350 /// getOptimalMemOpType - Returns the target specific optimal type for load 351 /// and store operations as a result of memset, memcpy, and memmove 352 /// lowering. If DstAlign is zero that means it's safe to destination 353 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 354 /// means there isn't a need to check it against alignment requirement, 355 /// probably because the source does not need to be loaded. If 356 /// 'NonScalarIntSafe' is true, that means it's safe to return a 357 /// non-scalar-integer type, e.g. empty string source, constant, or loaded 358 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is 359 /// constant so it does not need to be loaded. 360 /// It returns EVT::Other if the type should be determined using generic 361 /// target-independent logic. 362 virtual EVT 363 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, 364 bool NonScalarIntSafe, bool MemcpyStrSrc, 365 MachineFunction &MF) const; 366 367 private: 368 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; 369 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; 370 371 bool 372 IsEligibleForTailCallOptimization(SDValue Callee, 373 CallingConv::ID CalleeCC, 374 bool isVarArg, 375 const SmallVectorImpl<ISD::InputArg> &Ins, 376 SelectionDAG& DAG) const; 377 378 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 379 int SPDiff, 380 SDValue Chain, 381 SDValue &LROpOut, 382 SDValue &FPOpOut, 383 bool isDarwinABI, 384 DebugLoc dl) const; 385 386 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 387 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 388 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 389 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 390 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; 391 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 392 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 393 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; 394 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, 395 const PPCSubtarget &Subtarget) const; 396 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, 397 const PPCSubtarget &Subtarget) const; 398 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 399 const PPCSubtarget &Subtarget) const; 400 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, 401 const PPCSubtarget &Subtarget) const; 402 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 403 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, DebugLoc dl) const; 404 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 405 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 406 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const; 407 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const; 408 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const; 409 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 410 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 411 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 412 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; 413 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; 414 415 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 416 CallingConv::ID CallConv, bool isVarArg, 417 const SmallVectorImpl<ISD::InputArg> &Ins, 418 DebugLoc dl, SelectionDAG &DAG, 419 SmallVectorImpl<SDValue> &InVals) const; 420 SDValue FinishCall(CallingConv::ID CallConv, DebugLoc dl, bool isTailCall, 421 bool isVarArg, 422 SelectionDAG &DAG, 423 SmallVector<std::pair<unsigned, SDValue>, 8> 424 &RegsToPass, 425 SDValue InFlag, SDValue Chain, 426 SDValue &Callee, 427 int SPDiff, unsigned NumBytes, 428 const SmallVectorImpl<ISD::InputArg> &Ins, 429 SmallVectorImpl<SDValue> &InVals) const; 430 431 virtual SDValue 432 LowerFormalArguments(SDValue Chain, 433 CallingConv::ID CallConv, bool isVarArg, 434 const SmallVectorImpl<ISD::InputArg> &Ins, 435 DebugLoc dl, SelectionDAG &DAG, 436 SmallVectorImpl<SDValue> &InVals) const; 437 438 virtual SDValue 439 LowerCall(SDValue Chain, SDValue Callee, 440 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, 441 const SmallVectorImpl<ISD::OutputArg> &Outs, 442 const SmallVectorImpl<SDValue> &OutVals, 443 const SmallVectorImpl<ISD::InputArg> &Ins, 444 DebugLoc dl, SelectionDAG &DAG, 445 SmallVectorImpl<SDValue> &InVals) const; 446 447 virtual SDValue 448 LowerReturn(SDValue Chain, 449 CallingConv::ID CallConv, bool isVarArg, 450 const SmallVectorImpl<ISD::OutputArg> &Outs, 451 const SmallVectorImpl<SDValue> &OutVals, 452 DebugLoc dl, SelectionDAG &DAG) const; 453 454 SDValue 455 LowerFormalArguments_Darwin(SDValue Chain, 456 CallingConv::ID CallConv, bool isVarArg, 457 const SmallVectorImpl<ISD::InputArg> &Ins, 458 DebugLoc dl, SelectionDAG &DAG, 459 SmallVectorImpl<SDValue> &InVals) const; 460 SDValue 461 LowerFormalArguments_SVR4(SDValue Chain, 462 CallingConv::ID CallConv, bool isVarArg, 463 const SmallVectorImpl<ISD::InputArg> &Ins, 464 DebugLoc dl, SelectionDAG &DAG, 465 SmallVectorImpl<SDValue> &InVals) const; 466 467 SDValue 468 LowerCall_Darwin(SDValue Chain, SDValue Callee, 469 CallingConv::ID CallConv, bool isVarArg, bool isTailCall, 470 const SmallVectorImpl<ISD::OutputArg> &Outs, 471 const SmallVectorImpl<SDValue> &OutVals, 472 const SmallVectorImpl<ISD::InputArg> &Ins, 473 DebugLoc dl, SelectionDAG &DAG, 474 SmallVectorImpl<SDValue> &InVals) const; 475 SDValue 476 LowerCall_SVR4(SDValue Chain, SDValue Callee, 477 CallingConv::ID CallConv, bool isVarArg, bool isTailCall, 478 const SmallVectorImpl<ISD::OutputArg> &Outs, 479 const SmallVectorImpl<SDValue> &OutVals, 480 const SmallVectorImpl<ISD::InputArg> &Ins, 481 DebugLoc dl, SelectionDAG &DAG, 482 SmallVectorImpl<SDValue> &InVals) const; 483 }; 484 } 485 486 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H 487