1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef ARMISELLOWERING_H 16 #define ARMISELLOWERING_H 17 18 #include "MCTargetDesc/ARMBaseInfo.h" 19 #include "llvm/CodeGen/CallingConvLower.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/Target/TargetLowering.h" 22 #include <vector> 23 24 namespace llvm { 25 class ARMConstantPoolValue; 26 class ARMSubtarget; 27 28 namespace ARMISD { 29 // ARM Specific DAG Nodes 30 enum NodeType { 31 // Start the numbering where the builtin ops and target ops leave off. 32 FIRST_NUMBER = ISD::BUILTIN_OP_END, 33 34 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 35 // TargetExternalSymbol, and TargetGlobalAddress. 36 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 37 // PIC mode. 38 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 39 40 // Add pseudo op to model memcpy for struct byval. 41 COPY_STRUCT_BYVAL, 42 43 CALL, // Function call. 44 CALL_PRED, // Function call that's predicable. 45 CALL_NOLINK, // Function call with branch not branch-and-link. 46 tCALL, // Thumb function call. 47 BRCOND, // Conditional branch. 48 BR_JT, // Jumptable branch. 49 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 50 RET_FLAG, // Return with a flag operand. 51 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand. 52 53 PIC_ADD, // Add with a PC operand and a PIC label. 54 55 CMP, // ARM compare instructions. 56 CMN, // ARM CMN instructions. 57 CMPZ, // ARM compare that sets only Z flag. 58 CMPFP, // ARM VFP compare instruction, sets FPSCR. 59 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 60 FMSTAT, // ARM fmstat instruction. 61 62 CMOV, // ARM conditional move instructions. 63 64 BCC_i64, 65 66 RBIT, // ARM bitreverse instruction 67 68 FTOSI, // FP to sint within a FP register. 69 FTOUI, // FP to uint within a FP register. 70 SITOF, // sint to FP within a FP register. 71 UITOF, // uint to FP within a FP register. 72 73 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 74 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 75 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 76 77 ADDC, // Add with carry 78 ADDE, // Add using carry 79 SUBC, // Sub with carry 80 SUBE, // Sub using carry 81 82 VMOVRRD, // double to two gprs. 83 VMOVDRR, // Two gprs to double. 84 85 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 86 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 87 88 TC_RETURN, // Tail call return pseudo. 89 90 THREAD_POINTER, 91 92 DYN_ALLOC, // Dynamic allocation on the stack. 93 94 MEMBARRIER_MCR, // Memory barrier (MCR) 95 96 PRELOAD, // Preload 97 98 WIN__CHKSTK, // Windows' __chkstk call to do stack probing. 99 100 VCEQ, // Vector compare equal. 101 VCEQZ, // Vector compare equal to zero. 102 VCGE, // Vector compare greater than or equal. 103 VCGEZ, // Vector compare greater than or equal to zero. 104 VCLEZ, // Vector compare less than or equal to zero. 105 VCGEU, // Vector compare unsigned greater than or equal. 106 VCGT, // Vector compare greater than. 107 VCGTZ, // Vector compare greater than zero. 108 VCLTZ, // Vector compare less than zero. 109 VCGTU, // Vector compare unsigned greater than. 110 VTST, // Vector test bits. 111 112 // Vector shift by immediate: 113 VSHL, // ...left 114 VSHRs, // ...right (signed) 115 VSHRu, // ...right (unsigned) 116 117 // Vector rounding shift by immediate: 118 VRSHRs, // ...right (signed) 119 VRSHRu, // ...right (unsigned) 120 VRSHRN, // ...right narrow 121 122 // Vector saturating shift by immediate: 123 VQSHLs, // ...left (signed) 124 VQSHLu, // ...left (unsigned) 125 VQSHLsu, // ...left (signed to unsigned) 126 VQSHRNs, // ...right narrow (signed) 127 VQSHRNu, // ...right narrow (unsigned) 128 VQSHRNsu, // ...right narrow (signed to unsigned) 129 130 // Vector saturating rounding shift by immediate: 131 VQRSHRNs, // ...right narrow (signed) 132 VQRSHRNu, // ...right narrow (unsigned) 133 VQRSHRNsu, // ...right narrow (signed to unsigned) 134 135 // Vector shift and insert: 136 VSLI, // ...left 137 VSRI, // ...right 138 139 // Vector get lane (VMOV scalar to ARM core register) 140 // (These are used for 8- and 16-bit element types only.) 141 VGETLANEu, // zero-extend vector extract element 142 VGETLANEs, // sign-extend vector extract element 143 144 // Vector move immediate and move negated immediate: 145 VMOVIMM, 146 VMVNIMM, 147 148 // Vector move f32 immediate: 149 VMOVFPIMM, 150 151 // Vector duplicate: 152 VDUP, 153 VDUPLANE, 154 155 // Vector shuffles: 156 VEXT, // extract 157 VREV64, // reverse elements within 64-bit doublewords 158 VREV32, // reverse elements within 32-bit words 159 VREV16, // reverse elements within 16-bit halfwords 160 VZIP, // zip (interleave) 161 VUZP, // unzip (deinterleave) 162 VTRN, // transpose 163 VTBL1, // 1-register shuffle with mask 164 VTBL2, // 2-register shuffle with mask 165 166 // Vector multiply long: 167 VMULLs, // ...signed 168 VMULLu, // ...unsigned 169 170 UMLAL, // 64bit Unsigned Accumulate Multiply 171 SMLAL, // 64bit Signed Accumulate Multiply 172 173 // Operands of the standard BUILD_VECTOR node are not legalized, which 174 // is fine if BUILD_VECTORs are always lowered to shuffles or other 175 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 176 // operands need to be legalized. Define an ARM-specific version of 177 // BUILD_VECTOR for this purpose. 178 BUILD_VECTOR, 179 180 // Floating-point max and min: 181 FMAX, 182 FMIN, 183 VMAXNM, 184 VMINNM, 185 186 // Bit-field insert 187 BFI, 188 189 // Vector OR with immediate 190 VORRIMM, 191 // Vector AND with NOT of immediate 192 VBICIMM, 193 194 // Vector bitwise select 195 VBSL, 196 197 // Vector load N-element structure to all lanes: 198 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 199 VLD3DUP, 200 VLD4DUP, 201 202 // NEON loads with post-increment base updates: 203 VLD1_UPD, 204 VLD2_UPD, 205 VLD3_UPD, 206 VLD4_UPD, 207 VLD2LN_UPD, 208 VLD3LN_UPD, 209 VLD4LN_UPD, 210 VLD2DUP_UPD, 211 VLD3DUP_UPD, 212 VLD4DUP_UPD, 213 214 // NEON stores with post-increment base updates: 215 VST1_UPD, 216 VST2_UPD, 217 VST3_UPD, 218 VST4_UPD, 219 VST2LN_UPD, 220 VST3LN_UPD, 221 VST4LN_UPD 222 }; 223 } 224 225 /// Define some predicates that are used for node matching. 226 namespace ARM { 227 bool isBitFieldInvertedMask(unsigned v); 228 } 229 230 //===--------------------------------------------------------------------===// 231 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 232 233 class ARMTargetLowering : public TargetLowering { 234 public: 235 explicit ARMTargetLowering(TargetMachine &TM); 236 237 unsigned getJumpTableEncoding() const override; 238 239 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 240 241 /// ReplaceNodeResults - Replace the results of node with an illegal result 242 /// type with new values built out of custom code. 243 /// 244 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 245 SelectionDAG &DAG) const override; 246 247 const char *getTargetNodeName(unsigned Opcode) const override; 248 249 bool isSelectSupported(SelectSupportKind Kind) const override { 250 // ARM does not support scalar condition selects on vectors. 251 return (Kind != ScalarCondVectorVal); 252 } 253 254 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 255 EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override; 256 257 MachineBasicBlock * 258 EmitInstrWithCustomInserter(MachineInstr *MI, 259 MachineBasicBlock *MBB) const override; 260 261 void AdjustInstrPostInstrSelection(MachineInstr *MI, 262 SDNode *Node) const override; 263 264 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 265 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 266 267 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override; 268 269 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 270 /// unaligned memory accesses of the specified type. Returns whether it 271 /// is "fast" by reference in the second argument. 272 bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 273 bool *Fast) const override; 274 275 EVT getOptimalMemOpType(uint64_t Size, 276 unsigned DstAlign, unsigned SrcAlign, 277 bool IsMemset, bool ZeroMemset, 278 bool MemcpyStrSrc, 279 MachineFunction &MF) const override; 280 281 using TargetLowering::isZExtFree; 282 bool isZExtFree(SDValue Val, EVT VT2) const override; 283 284 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 285 286 287 /// isLegalAddressingMode - Return true if the addressing mode represented 288 /// by AM is legal for this target, for a load/store of the specified type. 289 bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override; 290 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 291 292 /// isLegalICmpImmediate - Return true if the specified immediate is legal 293 /// icmp immediate, that is the target has icmp instructions which can 294 /// compare a register against the immediate without having to materialize 295 /// the immediate into a register. 296 bool isLegalICmpImmediate(int64_t Imm) const override; 297 298 /// isLegalAddImmediate - Return true if the specified immediate is legal 299 /// add immediate, that is the target has add instructions which can 300 /// add a register and the immediate without having to materialize 301 /// the immediate into a register. 302 bool isLegalAddImmediate(int64_t Imm) const override; 303 304 /// getPreIndexedAddressParts - returns true by value, base pointer and 305 /// offset pointer and addressing mode by reference if the node's address 306 /// can be legally represented as pre-indexed load / store address. 307 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, 308 ISD::MemIndexedMode &AM, 309 SelectionDAG &DAG) const override; 310 311 /// getPostIndexedAddressParts - returns true by value, base pointer and 312 /// offset pointer and addressing mode by reference if this node can be 313 /// combined with a load / store to form a post-indexed load / store. 314 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, 315 SDValue &Offset, ISD::MemIndexedMode &AM, 316 SelectionDAG &DAG) const override; 317 318 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, 319 APInt &KnownOne, 320 const SelectionDAG &DAG, 321 unsigned Depth) const override; 322 323 324 bool ExpandInlineAsm(CallInst *CI) const override; 325 326 ConstraintType 327 getConstraintType(const std::string &Constraint) const override; 328 329 /// Examine constraint string and operand type and determine a weight value. 330 /// The operand object must already have been set up with the operand type. 331 ConstraintWeight getSingleConstraintMatchWeight( 332 AsmOperandInfo &info, const char *constraint) const override; 333 334 std::pair<unsigned, const TargetRegisterClass*> 335 getRegForInlineAsmConstraint(const std::string &Constraint, 336 MVT VT) const override; 337 338 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 339 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 340 /// true it means one of the asm constraint of the inline asm instruction 341 /// being processed is 'm'. 342 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 343 std::vector<SDValue> &Ops, 344 SelectionDAG &DAG) const override; 345 346 const ARMSubtarget* getSubtarget() const { 347 return Subtarget; 348 } 349 350 /// getRegClassFor - Return the register class that should be used for the 351 /// specified value type. 352 const TargetRegisterClass *getRegClassFor(MVT VT) const override; 353 354 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 355 /// be used for loads / stores from the global. 356 unsigned getMaximalGlobalOffset() const override; 357 358 /// Returns true if a cast between SrcAS and DestAS is a noop. 359 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override { 360 // Addrspacecasts are always noops. 361 return true; 362 } 363 364 /// createFastISel - This method returns a target specific FastISel object, 365 /// or null if the target does not support "fast" ISel. 366 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 367 const TargetLibraryInfo *libInfo) const override; 368 369 Sched::Preference getSchedulingPreference(SDNode *N) const override; 370 371 bool 372 isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override; 373 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 374 375 /// isFPImmLegal - Returns true if the target can instruction select the 376 /// specified FP immediate natively. If false, the legalizer will 377 /// materialize the FP immediate as a load from a constant pool. 378 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; 379 380 bool getTgtMemIntrinsic(IntrinsicInfo &Info, 381 const CallInst &I, 382 unsigned Intrinsic) const override; 383 384 /// \brief Returns true if it is beneficial to convert a load of a constant 385 /// to just the constant itself. 386 bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 387 Type *Ty) const override; 388 389 /// \brief Returns true if an argument of type Ty needs to be passed in a 390 /// contiguous block of registers in calling convention CallConv. 391 bool functionArgumentNeedsConsecutiveRegisters( 392 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override; 393 394 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 395 AtomicOrdering Ord) const override; 396 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 397 Value *Addr, AtomicOrdering Ord) const override; 398 399 bool shouldExpandAtomicInIR(Instruction *Inst) const override; 400 401 protected: 402 std::pair<const TargetRegisterClass*, uint8_t> 403 findRepresentativeClass(MVT VT) const override; 404 405 private: 406 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 407 /// make the right decision when generating code for different targets. 408 const ARMSubtarget *Subtarget; 409 410 const TargetRegisterInfo *RegInfo; 411 412 const InstrItineraryData *Itins; 413 414 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 415 /// 416 unsigned ARMPCLabelIndex; 417 418 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 419 void addDRTypeForNEON(MVT VT); 420 void addQRTypeForNEON(MVT VT); 421 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const; 422 423 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 424 void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 425 SDValue Chain, SDValue &Arg, 426 RegsToPassVector &RegsToPass, 427 CCValAssign &VA, CCValAssign &NextVA, 428 SDValue &StackPtr, 429 SmallVectorImpl<SDValue> &MemOpChains, 430 ISD::ArgFlagsTy Flags) const; 431 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 432 SDValue &Root, SelectionDAG &DAG, 433 SDLoc dl) const; 434 435 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC, 436 bool isVarArg) const; 437 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 438 bool isVarArg) const; 439 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 440 SDLoc dl, SelectionDAG &DAG, 441 const CCValAssign &VA, 442 ISD::ArgFlagsTy Flags) const; 443 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 444 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 445 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 446 const ARMSubtarget *Subtarget) const; 447 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 448 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 449 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 450 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const; 451 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 452 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 453 SelectionDAG &DAG) const; 454 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 455 SelectionDAG &DAG, 456 TLSModel::Model model) const; 457 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 458 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 459 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; 460 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 461 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 462 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 463 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 464 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 465 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 466 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 467 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 468 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 469 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 470 const ARMSubtarget *ST) const; 471 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 472 const ARMSubtarget *ST) const; 473 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; 474 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 475 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 476 477 unsigned getRegisterByName(const char* RegName, EVT VT) const override; 478 479 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 480 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 481 /// expanded to FMAs when this method returns true, otherwise fmuladd is 482 /// expanded to fmul + fadd. 483 /// 484 /// ARM supports both fused and unfused multiply-add operations; we already 485 /// lower a pair of fmul and fadd to the latter so it's not clear that there 486 /// would be a gain or that the gain would be worthwhile enough to risk 487 /// correctness bugs. 488 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; } 489 490 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 491 492 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 493 CallingConv::ID CallConv, bool isVarArg, 494 const SmallVectorImpl<ISD::InputArg> &Ins, 495 SDLoc dl, SelectionDAG &DAG, 496 SmallVectorImpl<SDValue> &InVals, 497 bool isThisReturn, SDValue ThisVal) const; 498 499 SDValue 500 LowerFormalArguments(SDValue Chain, 501 CallingConv::ID CallConv, bool isVarArg, 502 const SmallVectorImpl<ISD::InputArg> &Ins, 503 SDLoc dl, SelectionDAG &DAG, 504 SmallVectorImpl<SDValue> &InVals) const override; 505 506 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 507 SDLoc dl, SDValue &Chain, 508 const Value *OrigArg, 509 unsigned InRegsParamRecordIdx, 510 unsigned OffsetFromOrigArg, 511 unsigned ArgOffset, 512 unsigned ArgSize, 513 bool ForceMutable, 514 unsigned ByValStoreOffset, 515 unsigned TotalArgRegsSaveSize) const; 516 517 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 518 SDLoc dl, SDValue &Chain, 519 unsigned ArgOffset, 520 unsigned TotalArgRegsSaveSize, 521 bool ForceMutable = false) const; 522 523 void computeRegArea(CCState &CCInfo, MachineFunction &MF, 524 unsigned InRegsParamRecordIdx, 525 unsigned ArgSize, 526 unsigned &ArgRegsSize, 527 unsigned &ArgRegsSaveSize) const; 528 529 SDValue 530 LowerCall(TargetLowering::CallLoweringInfo &CLI, 531 SmallVectorImpl<SDValue> &InVals) const override; 532 533 /// HandleByVal - Target-specific cleanup for ByVal support. 534 void HandleByVal(CCState *, unsigned &, unsigned) const override; 535 536 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 537 /// for tail call optimization. Targets which want to do tail call 538 /// optimization should implement this function. 539 bool IsEligibleForTailCallOptimization(SDValue Callee, 540 CallingConv::ID CalleeCC, 541 bool isVarArg, 542 bool isCalleeStructRet, 543 bool isCallerStructRet, 544 const SmallVectorImpl<ISD::OutputArg> &Outs, 545 const SmallVectorImpl<SDValue> &OutVals, 546 const SmallVectorImpl<ISD::InputArg> &Ins, 547 SelectionDAG& DAG) const; 548 549 bool CanLowerReturn(CallingConv::ID CallConv, 550 MachineFunction &MF, bool isVarArg, 551 const SmallVectorImpl<ISD::OutputArg> &Outs, 552 LLVMContext &Context) const override; 553 554 SDValue 555 LowerReturn(SDValue Chain, 556 CallingConv::ID CallConv, bool isVarArg, 557 const SmallVectorImpl<ISD::OutputArg> &Outs, 558 const SmallVectorImpl<SDValue> &OutVals, 559 SDLoc dl, SelectionDAG &DAG) const override; 560 561 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; 562 563 bool mayBeEmittedAsTailCall(CallInst *CI) const override; 564 565 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 566 SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const; 567 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 568 SelectionDAG &DAG, SDLoc dl) const; 569 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 570 571 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 572 573 void SetupEntryBlockForSjLj(MachineInstr *MI, 574 MachineBasicBlock *MBB, 575 MachineBasicBlock *DispatchBB, int FI) const; 576 577 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI, 578 MachineBasicBlock *MBB) const; 579 580 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const; 581 582 MachineBasicBlock *EmitStructByval(MachineInstr *MI, 583 MachineBasicBlock *MBB) const; 584 585 MachineBasicBlock *EmitLowered__chkstk(MachineInstr *MI, 586 MachineBasicBlock *MBB) const; 587 }; 588 589 enum NEONModImmType { 590 VMOVModImm, 591 VMVNModImm, 592 OtherModImm 593 }; 594 595 namespace ARM { 596 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 597 const TargetLibraryInfo *libInfo); 598 } 599 } 600 601 #endif // ARMISELLOWERING_H 602