1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef ARMISELLOWERING_H 16 #define ARMISELLOWERING_H 17 18 #include "ARM.h" 19 #include "ARMSubtarget.h" 20 #include "llvm/CodeGen/CallingConvLower.h" 21 #include "llvm/CodeGen/FastISel.h" 22 #include "llvm/CodeGen/SelectionDAG.h" 23 #include "llvm/Target/TargetLowering.h" 24 #include "llvm/Target/TargetRegisterInfo.h" 25 #include <vector> 26 27 namespace llvm { 28 class ARMConstantPoolValue; 29 30 namespace ARMISD { 31 // ARM Specific DAG Nodes 32 enum NodeType { 33 // Start the numbering where the builtin ops and target ops leave off. 34 FIRST_NUMBER = ISD::BUILTIN_OP_END, 35 36 Wrapper, // Wrapper - A wrapper node for TargetConstantPool, 37 // TargetExternalSymbol, and TargetGlobalAddress. 38 WrapperDYN, // WrapperDYN - A wrapper node for TargetGlobalAddress in 39 // DYN mode. 40 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in 41 // PIC mode. 42 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable 43 44 // Add pseudo op to model memcpy for struct byval. 45 COPY_STRUCT_BYVAL, 46 47 CALL, // Function call. 48 CALL_PRED, // Function call that's predicable. 49 CALL_NOLINK, // Function call with branch not branch-and-link. 50 tCALL, // Thumb function call. 51 BRCOND, // Conditional branch. 52 BR_JT, // Jumptable branch. 53 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump). 54 RET_FLAG, // Return with a flag operand. 55 56 PIC_ADD, // Add with a PC operand and a PIC label. 57 58 CMP, // ARM compare instructions. 59 CMN, // ARM CMN instructions. 60 CMPZ, // ARM compare that sets only Z flag. 61 CMPFP, // ARM VFP compare instruction, sets FPSCR. 62 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR. 63 FMSTAT, // ARM fmstat instruction. 64 65 CMOV, // ARM conditional move instructions. 66 67 BCC_i64, 68 69 RBIT, // ARM bitreverse instruction 70 71 FTOSI, // FP to sint within a FP register. 72 FTOUI, // FP to uint within a FP register. 73 SITOF, // sint to FP within a FP register. 74 UITOF, // uint to FP within a FP register. 75 76 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out. 77 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out. 78 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag. 79 80 ADDC, // Add with carry 81 ADDE, // Add using carry 82 SUBC, // Sub with carry 83 SUBE, // Sub using carry 84 85 VMOVRRD, // double to two gprs. 86 VMOVDRR, // Two gprs to double. 87 88 EH_SJLJ_SETJMP, // SjLj exception handling setjmp. 89 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp. 90 91 TC_RETURN, // Tail call return pseudo. 92 93 THREAD_POINTER, 94 95 DYN_ALLOC, // Dynamic allocation on the stack. 96 97 MEMBARRIER, // Memory barrier (DMB) 98 MEMBARRIER_MCR, // Memory barrier (MCR) 99 100 PRELOAD, // Preload 101 102 VCEQ, // Vector compare equal. 103 VCEQZ, // Vector compare equal to zero. 104 VCGE, // Vector compare greater than or equal. 105 VCGEZ, // Vector compare greater than or equal to zero. 106 VCLEZ, // Vector compare less than or equal to zero. 107 VCGEU, // Vector compare unsigned greater than or equal. 108 VCGT, // Vector compare greater than. 109 VCGTZ, // Vector compare greater than zero. 110 VCLTZ, // Vector compare less than zero. 111 VCGTU, // Vector compare unsigned greater than. 112 VTST, // Vector test bits. 113 114 // Vector shift by immediate: 115 VSHL, // ...left 116 VSHRs, // ...right (signed) 117 VSHRu, // ...right (unsigned) 118 VSHLLs, // ...left long (signed) 119 VSHLLu, // ...left long (unsigned) 120 VSHLLi, // ...left long (with maximum shift count) 121 VSHRN, // ...right narrow 122 123 // Vector rounding shift by immediate: 124 VRSHRs, // ...right (signed) 125 VRSHRu, // ...right (unsigned) 126 VRSHRN, // ...right narrow 127 128 // Vector saturating shift by immediate: 129 VQSHLs, // ...left (signed) 130 VQSHLu, // ...left (unsigned) 131 VQSHLsu, // ...left (signed to unsigned) 132 VQSHRNs, // ...right narrow (signed) 133 VQSHRNu, // ...right narrow (unsigned) 134 VQSHRNsu, // ...right narrow (signed to unsigned) 135 136 // Vector saturating rounding shift by immediate: 137 VQRSHRNs, // ...right narrow (signed) 138 VQRSHRNu, // ...right narrow (unsigned) 139 VQRSHRNsu, // ...right narrow (signed to unsigned) 140 141 // Vector shift and insert: 142 VSLI, // ...left 143 VSRI, // ...right 144 145 // Vector get lane (VMOV scalar to ARM core register) 146 // (These are used for 8- and 16-bit element types only.) 147 VGETLANEu, // zero-extend vector extract element 148 VGETLANEs, // sign-extend vector extract element 149 150 // Vector move immediate and move negated immediate: 151 VMOVIMM, 152 VMVNIMM, 153 154 // Vector move f32 immediate: 155 VMOVFPIMM, 156 157 // Vector duplicate: 158 VDUP, 159 VDUPLANE, 160 161 // Vector shuffles: 162 VEXT, // extract 163 VREV64, // reverse elements within 64-bit doublewords 164 VREV32, // reverse elements within 32-bit words 165 VREV16, // reverse elements within 16-bit halfwords 166 VZIP, // zip (interleave) 167 VUZP, // unzip (deinterleave) 168 VTRN, // transpose 169 VTBL1, // 1-register shuffle with mask 170 VTBL2, // 2-register shuffle with mask 171 172 // Vector multiply long: 173 VMULLs, // ...signed 174 VMULLu, // ...unsigned 175 176 UMLAL, // 64bit Unsigned Accumulate Multiply 177 SMLAL, // 64bit Signed Accumulate Multiply 178 179 // Operands of the standard BUILD_VECTOR node are not legalized, which 180 // is fine if BUILD_VECTORs are always lowered to shuffles or other 181 // operations, but for ARM some BUILD_VECTORs are legal as-is and their 182 // operands need to be legalized. Define an ARM-specific version of 183 // BUILD_VECTOR for this purpose. 184 BUILD_VECTOR, 185 186 // Floating-point max and min: 187 FMAX, 188 FMIN, 189 190 // Bit-field insert 191 BFI, 192 193 // Vector OR with immediate 194 VORRIMM, 195 // Vector AND with NOT of immediate 196 VBICIMM, 197 198 // Vector bitwise select 199 VBSL, 200 201 // Vector load N-element structure to all lanes: 202 VLD2DUP = ISD::FIRST_TARGET_MEMORY_OPCODE, 203 VLD3DUP, 204 VLD4DUP, 205 206 // NEON loads with post-increment base updates: 207 VLD1_UPD, 208 VLD2_UPD, 209 VLD3_UPD, 210 VLD4_UPD, 211 VLD2LN_UPD, 212 VLD3LN_UPD, 213 VLD4LN_UPD, 214 VLD2DUP_UPD, 215 VLD3DUP_UPD, 216 VLD4DUP_UPD, 217 218 // NEON stores with post-increment base updates: 219 VST1_UPD, 220 VST2_UPD, 221 VST3_UPD, 222 VST4_UPD, 223 VST2LN_UPD, 224 VST3LN_UPD, 225 VST4LN_UPD, 226 227 // 64-bit atomic ops (value split into two registers) 228 ATOMADD64_DAG, 229 ATOMSUB64_DAG, 230 ATOMOR64_DAG, 231 ATOMXOR64_DAG, 232 ATOMAND64_DAG, 233 ATOMNAND64_DAG, 234 ATOMSWAP64_DAG, 235 ATOMCMPXCHG64_DAG, 236 ATOMMIN64_DAG, 237 ATOMUMIN64_DAG, 238 ATOMMAX64_DAG, 239 ATOMUMAX64_DAG 240 }; 241 } 242 243 /// Define some predicates that are used for node matching. 244 namespace ARM { 245 bool isBitFieldInvertedMask(unsigned v); 246 } 247 248 //===--------------------------------------------------------------------===// 249 // ARMTargetLowering - ARM Implementation of the TargetLowering interface 250 251 class ARMTargetLowering : public TargetLowering { 252 public: 253 explicit ARMTargetLowering(TargetMachine &TM); 254 255 virtual unsigned getJumpTableEncoding() const; 256 257 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 258 259 /// ReplaceNodeResults - Replace the results of node with an illegal result 260 /// type with new values built out of custom code. 261 /// 262 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results, 263 SelectionDAG &DAG) const; 264 265 virtual const char *getTargetNodeName(unsigned Opcode) const; 266 267 virtual bool isSelectSupported(SelectSupportKind Kind) const { 268 // ARM does not support scalar condition selects on vectors. 269 return (Kind != ScalarCondVectorVal); 270 } 271 272 /// getSetCCResultType - Return the value type to use for ISD::SETCC. 273 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; 274 275 virtual MachineBasicBlock * 276 EmitInstrWithCustomInserter(MachineInstr *MI, 277 MachineBasicBlock *MBB) const; 278 279 virtual void 280 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const; 281 282 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const; 283 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 284 285 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const; 286 287 /// allowsUnalignedMemoryAccesses - Returns true if the target allows 288 /// unaligned memory accesses of the specified type. Returns whether it 289 /// is "fast" by reference in the second argument. 290 virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const; 291 292 virtual EVT getOptimalMemOpType(uint64_t Size, 293 unsigned DstAlign, unsigned SrcAlign, 294 bool IsMemset, bool ZeroMemset, 295 bool MemcpyStrSrc, 296 MachineFunction &MF) const; 297 298 using TargetLowering::isZExtFree; 299 virtual bool isZExtFree(SDValue Val, EVT VT2) const; 300 301 virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const; 302 303 304 /// isLegalAddressingMode - Return true if the addressing mode represented 305 /// by AM is legal for this target, for a load/store of the specified type. 306 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; 307 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const; 308 309 /// isLegalICmpImmediate - Return true if the specified immediate is legal 310 /// icmp immediate, that is the target has icmp instructions which can 311 /// compare a register against the immediate without having to materialize 312 /// the immediate into a register. 313 virtual bool isLegalICmpImmediate(int64_t Imm) const; 314 315 /// isLegalAddImmediate - Return true if the specified immediate is legal 316 /// add immediate, that is the target has add instructions which can 317 /// add a register and the immediate without having to materialize 318 /// the immediate into a register. 319 virtual bool isLegalAddImmediate(int64_t Imm) const; 320 321 /// getPreIndexedAddressParts - returns true by value, base pointer and 322 /// offset pointer and addressing mode by reference if the node's address 323 /// can be legally represented as pre-indexed load / store address. 324 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, 325 SDValue &Offset, 326 ISD::MemIndexedMode &AM, 327 SelectionDAG &DAG) const; 328 329 /// getPostIndexedAddressParts - returns true by value, base pointer and 330 /// offset pointer and addressing mode by reference if this node can be 331 /// combined with a load / store to form a post-indexed load / store. 332 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 333 SDValue &Base, SDValue &Offset, 334 ISD::MemIndexedMode &AM, 335 SelectionDAG &DAG) const; 336 337 virtual void computeMaskedBitsForTargetNode(const SDValue Op, 338 APInt &KnownZero, 339 APInt &KnownOne, 340 const SelectionDAG &DAG, 341 unsigned Depth) const; 342 343 344 virtual bool ExpandInlineAsm(CallInst *CI) const; 345 346 ConstraintType getConstraintType(const std::string &Constraint) const; 347 348 /// Examine constraint string and operand type and determine a weight value. 349 /// The operand object must already have been set up with the operand type. 350 ConstraintWeight getSingleConstraintMatchWeight( 351 AsmOperandInfo &info, const char *constraint) const; 352 353 std::pair<unsigned, const TargetRegisterClass*> 354 getRegForInlineAsmConstraint(const std::string &Constraint, 355 MVT VT) const; 356 357 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 358 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is 359 /// true it means one of the asm constraint of the inline asm instruction 360 /// being processed is 'm'. 361 virtual void LowerAsmOperandForConstraint(SDValue Op, 362 std::string &Constraint, 363 std::vector<SDValue> &Ops, 364 SelectionDAG &DAG) const; 365 366 const ARMSubtarget* getSubtarget() const { 367 return Subtarget; 368 } 369 370 /// getRegClassFor - Return the register class that should be used for the 371 /// specified value type. 372 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const; 373 374 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 375 /// be used for loads / stores from the global. 376 virtual unsigned getMaximalGlobalOffset() const; 377 378 /// createFastISel - This method returns a target specific FastISel object, 379 /// or null if the target does not support "fast" ISel. 380 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 381 const TargetLibraryInfo *libInfo) const; 382 383 Sched::Preference getSchedulingPreference(SDNode *N) const; 384 385 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const; 386 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 387 388 /// isFPImmLegal - Returns true if the target can instruction select the 389 /// specified FP immediate natively. If false, the legalizer will 390 /// materialize the FP immediate as a load from a constant pool. 391 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; 392 393 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, 394 const CallInst &I, 395 unsigned Intrinsic) const; 396 protected: 397 std::pair<const TargetRegisterClass*, uint8_t> 398 findRepresentativeClass(MVT VT) const; 399 400 private: 401 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can 402 /// make the right decision when generating code for different targets. 403 const ARMSubtarget *Subtarget; 404 405 const TargetRegisterInfo *RegInfo; 406 407 const InstrItineraryData *Itins; 408 409 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created. 410 /// 411 unsigned ARMPCLabelIndex; 412 413 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT); 414 void addDRTypeForNEON(MVT VT); 415 void addQRTypeForNEON(MVT VT); 416 417 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector; 418 void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 419 SDValue Chain, SDValue &Arg, 420 RegsToPassVector &RegsToPass, 421 CCValAssign &VA, CCValAssign &NextVA, 422 SDValue &StackPtr, 423 SmallVectorImpl<SDValue> &MemOpChains, 424 ISD::ArgFlagsTy Flags) const; 425 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 426 SDValue &Root, SelectionDAG &DAG, 427 SDLoc dl) const; 428 429 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, 430 bool isVarArg) const; 431 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, 432 SDLoc dl, SelectionDAG &DAG, 433 const CCValAssign &VA, 434 ISD::ArgFlagsTy Flags) const; 435 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; 436 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; 437 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 438 const ARMSubtarget *Subtarget) const; 439 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 440 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const; 441 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const; 442 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 443 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 444 SelectionDAG &DAG) const; 445 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, 446 SelectionDAG &DAG, 447 TLSModel::Model model) const; 448 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 449 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const; 450 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; 451 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; 452 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; 453 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; 454 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 455 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 456 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const; 457 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; 458 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; 459 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG, 460 const ARMSubtarget *ST) const; 461 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 462 const ARMSubtarget *ST) const; 463 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const; 464 465 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster 466 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be 467 /// expanded to FMAs when this method returns true, otherwise fmuladd is 468 /// expanded to fmul + fadd. 469 /// 470 /// ARM supports both fused and unfused multiply-add operations; we already 471 /// lower a pair of fmul and fadd to the latter so it's not clear that there 472 /// would be a gain or that the gain would be worthwhile enough to risk 473 /// correctness bugs. 474 virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const { return false; } 475 476 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const; 477 478 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 479 CallingConv::ID CallConv, bool isVarArg, 480 const SmallVectorImpl<ISD::InputArg> &Ins, 481 SDLoc dl, SelectionDAG &DAG, 482 SmallVectorImpl<SDValue> &InVals, 483 bool isThisReturn, SDValue ThisVal) const; 484 485 virtual SDValue 486 LowerFormalArguments(SDValue Chain, 487 CallingConv::ID CallConv, bool isVarArg, 488 const SmallVectorImpl<ISD::InputArg> &Ins, 489 SDLoc dl, SelectionDAG &DAG, 490 SmallVectorImpl<SDValue> &InVals) const; 491 492 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 493 SDLoc dl, SDValue &Chain, 494 const Value *OrigArg, 495 unsigned InRegsParamRecordIdx, 496 unsigned OffsetFromOrigArg, 497 unsigned ArgOffset, 498 unsigned ArgSize, 499 bool ForceMutable) const; 500 501 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 502 SDLoc dl, SDValue &Chain, 503 unsigned ArgOffset, 504 bool ForceMutable = false) const; 505 506 void computeRegArea(CCState &CCInfo, MachineFunction &MF, 507 unsigned InRegsParamRecordIdx, 508 unsigned ArgSize, 509 unsigned &ArgRegsSize, 510 unsigned &ArgRegsSaveSize) const; 511 512 virtual SDValue 513 LowerCall(TargetLowering::CallLoweringInfo &CLI, 514 SmallVectorImpl<SDValue> &InVals) const; 515 516 /// HandleByVal - Target-specific cleanup for ByVal support. 517 virtual void HandleByVal(CCState *, unsigned &, unsigned) const; 518 519 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 520 /// for tail call optimization. Targets which want to do tail call 521 /// optimization should implement this function. 522 bool IsEligibleForTailCallOptimization(SDValue Callee, 523 CallingConv::ID CalleeCC, 524 bool isVarArg, 525 bool isCalleeStructRet, 526 bool isCallerStructRet, 527 const SmallVectorImpl<ISD::OutputArg> &Outs, 528 const SmallVectorImpl<SDValue> &OutVals, 529 const SmallVectorImpl<ISD::InputArg> &Ins, 530 SelectionDAG& DAG) const; 531 532 virtual bool CanLowerReturn(CallingConv::ID CallConv, 533 MachineFunction &MF, bool isVarArg, 534 const SmallVectorImpl<ISD::OutputArg> &Outs, 535 LLVMContext &Context) const; 536 537 virtual SDValue 538 LowerReturn(SDValue Chain, 539 CallingConv::ID CallConv, bool isVarArg, 540 const SmallVectorImpl<ISD::OutputArg> &Outs, 541 const SmallVectorImpl<SDValue> &OutVals, 542 SDLoc dl, SelectionDAG &DAG) const; 543 544 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; 545 546 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; 547 548 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 549 SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const; 550 SDValue getVFPCmp(SDValue LHS, SDValue RHS, 551 SelectionDAG &DAG, SDLoc dl) const; 552 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; 553 554 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; 555 556 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI, 557 MachineBasicBlock *BB, 558 unsigned Size) const; 559 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, 560 MachineBasicBlock *BB, 561 unsigned Size, 562 unsigned BinOpcode) const; 563 MachineBasicBlock *EmitAtomicBinary64(MachineInstr *MI, 564 MachineBasicBlock *BB, 565 unsigned Op1, 566 unsigned Op2, 567 bool NeedsCarry = false, 568 bool IsCmpxchg = false, 569 bool IsMinMax = false, 570 ARMCC::CondCodes CC = ARMCC::AL) const; 571 MachineBasicBlock * EmitAtomicBinaryMinMax(MachineInstr *MI, 572 MachineBasicBlock *BB, 573 unsigned Size, 574 bool signExtend, 575 ARMCC::CondCodes Cond) const; 576 577 void SetupEntryBlockForSjLj(MachineInstr *MI, 578 MachineBasicBlock *MBB, 579 MachineBasicBlock *DispatchBB, int FI) const; 580 581 MachineBasicBlock *EmitSjLjDispatchBlock(MachineInstr *MI, 582 MachineBasicBlock *MBB) const; 583 584 bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const; 585 586 MachineBasicBlock *EmitStructByval(MachineInstr *MI, 587 MachineBasicBlock *MBB) const; 588 }; 589 590 enum NEONModImmType { 591 VMOVModImm, 592 VMVNModImm, 593 OtherModImm 594 }; 595 596 597 namespace ARM { 598 FastISel *createFastISel(FunctionLoweringInfo &funcInfo, 599 const TargetLibraryInfo *libInfo); 600 } 601 } 602 603 #endif // ARMISELLOWERING_H 604