1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that ARM uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "ARMISelLowering.h" 16 #include "ARMCallingConv.h" 17 #include "ARMConstantPoolValue.h" 18 #include "ARMMachineFunctionInfo.h" 19 #include "ARMPerfectShuffle.h" 20 #include "ARMSubtarget.h" 21 #include "ARMTargetMachine.h" 22 #include "ARMTargetObjectFile.h" 23 #include "MCTargetDesc/ARMAddressingModes.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/CodeGen/CallingConvLower.h" 27 #include "llvm/CodeGen/IntrinsicLowering.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineFrameInfo.h" 30 #include "llvm/CodeGen/MachineFunction.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineModuleInfo.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/SelectionDAG.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/GlobalValue.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/Type.h" 44 #include "llvm/MC/MCSectionMachO.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Support/ErrorHandling.h" 48 #include "llvm/Support/MathExtras.h" 49 #include "llvm/Target/TargetOptions.h" 50 #include <utility> 51 using namespace llvm; 52 53 #define DEBUG_TYPE "arm-isel" 54 55 STATISTIC(NumTailCalls, "Number of tail calls"); 56 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 57 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 58 59 cl::opt<bool> 60 EnableARMLongCalls("arm-long-calls", cl::Hidden, 61 cl::desc("Generate calls via indirect call instructions"), 62 cl::init(false)); 63 64 static cl::opt<bool> 65 ARMInterworking("arm-interworking", cl::Hidden, 66 cl::desc("Enable / disable ARM interworking (for debugging only)"), 67 cl::init(true)); 68 69 namespace { 70 class ARMCCState : public CCState { 71 public: 72 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 73 const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs, 74 LLVMContext &C, ParmContext PC) 75 : CCState(CC, isVarArg, MF, TM, locs, C) { 76 assert(((PC == Call) || (PC == Prologue)) && 77 "ARMCCState users must specify whether their context is call" 78 "or prologue generation."); 79 CallOrPrologue = PC; 80 } 81 }; 82 } 83 84 // The APCS parameter registers. 85 static const MCPhysReg GPRArgRegs[] = { 86 ARM::R0, ARM::R1, ARM::R2, ARM::R3 87 }; 88 89 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 90 MVT PromotedBitwiseVT) { 91 if (VT != PromotedLdStVT) { 92 setOperationAction(ISD::LOAD, VT, Promote); 93 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 94 95 setOperationAction(ISD::STORE, VT, Promote); 96 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 97 } 98 99 MVT ElemTy = VT.getVectorElementType(); 100 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 101 setOperationAction(ISD::SETCC, VT, Custom); 102 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 103 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 104 if (ElemTy == MVT::i32) { 105 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 106 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 107 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 108 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 109 } else { 110 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 111 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 112 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 113 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 114 } 115 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 116 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 117 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 118 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 119 setOperationAction(ISD::SELECT, VT, Expand); 120 setOperationAction(ISD::SELECT_CC, VT, Expand); 121 setOperationAction(ISD::VSELECT, VT, Expand); 122 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 123 if (VT.isInteger()) { 124 setOperationAction(ISD::SHL, VT, Custom); 125 setOperationAction(ISD::SRA, VT, Custom); 126 setOperationAction(ISD::SRL, VT, Custom); 127 } 128 129 // Promote all bit-wise operations. 130 if (VT.isInteger() && VT != PromotedBitwiseVT) { 131 setOperationAction(ISD::AND, VT, Promote); 132 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 133 setOperationAction(ISD::OR, VT, Promote); 134 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 135 setOperationAction(ISD::XOR, VT, Promote); 136 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 137 } 138 139 // Neon does not support vector divide/remainder operations. 140 setOperationAction(ISD::SDIV, VT, Expand); 141 setOperationAction(ISD::UDIV, VT, Expand); 142 setOperationAction(ISD::FDIV, VT, Expand); 143 setOperationAction(ISD::SREM, VT, Expand); 144 setOperationAction(ISD::UREM, VT, Expand); 145 setOperationAction(ISD::FREM, VT, Expand); 146 } 147 148 void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 149 addRegisterClass(VT, &ARM::DPRRegClass); 150 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 151 } 152 153 void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 154 addRegisterClass(VT, &ARM::DPairRegClass); 155 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 156 } 157 158 static TargetLoweringObjectFile *createTLOF(const Triple &TT) { 159 if (TT.isOSBinFormatMachO()) 160 return new TargetLoweringObjectFileMachO(); 161 if (TT.isOSWindows()) 162 return new TargetLoweringObjectFileCOFF(); 163 return new ARMElfTargetObjectFile(); 164 } 165 166 ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 167 : TargetLowering(TM, createTLOF(Triple(TM.getTargetTriple()))) { 168 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 169 RegInfo = TM.getRegisterInfo(); 170 Itins = TM.getInstrItineraryData(); 171 172 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 173 174 if (Subtarget->isTargetMachO()) { 175 // Uses VFP for Thumb libfuncs if available. 176 if (Subtarget->isThumb() && Subtarget->hasVFP2() && 177 Subtarget->hasARMOps() && !TM.Options.UseSoftFloat) { 178 // Single-precision floating-point arithmetic. 179 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 180 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 181 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 182 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 183 184 // Double-precision floating-point arithmetic. 185 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 186 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 187 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 188 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 189 190 // Single-precision comparisons. 191 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 192 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 193 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 194 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 195 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 196 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 197 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 198 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 199 200 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 201 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 202 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 208 209 // Double-precision comparisons. 210 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 211 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 212 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 213 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 214 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 215 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 216 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 217 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 218 219 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 220 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 221 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 222 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 223 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 224 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 225 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 226 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 227 228 // Floating-point to integer conversions. 229 // i64 conversions are done via library routines even when generating VFP 230 // instructions, so use the same ones. 231 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 232 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 233 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 234 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 235 236 // Conversions between floating types. 237 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 238 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 239 240 // Integer to floating-point conversions. 241 // i64 conversions are done via library routines even when generating VFP 242 // instructions, so use the same ones. 243 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 244 // e.g., __floatunsidf vs. __floatunssidfvfp. 245 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 246 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 247 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 248 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 249 } 250 } 251 252 // These libcalls are not available in 32-bit. 253 setLibcallName(RTLIB::SHL_I128, nullptr); 254 setLibcallName(RTLIB::SRL_I128, nullptr); 255 setLibcallName(RTLIB::SRA_I128, nullptr); 256 257 if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetMachO() && 258 !Subtarget->isTargetWindows()) { 259 static const struct { 260 const RTLIB::Libcall Op; 261 const char * const Name; 262 const CallingConv::ID CC; 263 const ISD::CondCode Cond; 264 } LibraryCalls[] = { 265 // Double-precision floating-point arithmetic helper functions 266 // RTABI chapter 4.1.2, Table 2 267 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 268 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 269 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 270 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 271 272 // Double-precision floating-point comparison helper functions 273 // RTABI chapter 4.1.2, Table 3 274 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 275 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 276 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 277 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 278 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 279 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 280 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 281 { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 282 283 // Single-precision floating-point arithmetic helper functions 284 // RTABI chapter 4.1.2, Table 4 285 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 286 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 287 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 288 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 289 290 // Single-precision floating-point comparison helper functions 291 // RTABI chapter 4.1.2, Table 5 292 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE }, 293 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ }, 294 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE }, 295 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE }, 296 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE }, 297 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE }, 298 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE }, 299 { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ }, 300 301 // Floating-point to integer conversions. 302 // RTABI chapter 4.1.2, Table 6 303 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 304 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 305 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 306 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 307 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 308 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 309 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 310 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 311 312 // Conversions between floating types. 313 // RTABI chapter 4.1.2, Table 7 314 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 315 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 316 317 // Integer to floating-point conversions. 318 // RTABI chapter 4.1.2, Table 8 319 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 320 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 321 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 322 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 323 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 324 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 325 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 326 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 327 328 // Long long helper functions 329 // RTABI chapter 4.2, Table 9 330 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 331 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 332 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 333 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 334 335 // Integer division functions 336 // RTABI chapter 4.3.1 337 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 338 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 339 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 340 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 341 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 342 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 343 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 344 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 345 346 // Memory operations 347 // RTABI chapter 4.3.4 348 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 349 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 350 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID }, 351 }; 352 353 for (const auto &LC : LibraryCalls) { 354 setLibcallName(LC.Op, LC.Name); 355 setLibcallCallingConv(LC.Op, LC.CC); 356 if (LC.Cond != ISD::SETCC_INVALID) 357 setCmpLibcallCC(LC.Op, LC.Cond); 358 } 359 } 360 361 if (Subtarget->isTargetWindows()) { 362 static const struct { 363 const RTLIB::Libcall Op; 364 const char * const Name; 365 const CallingConv::ID CC; 366 } LibraryCalls[] = { 367 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP }, 368 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP }, 369 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP }, 370 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP }, 371 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP }, 372 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP }, 373 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP }, 374 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP }, 375 }; 376 377 for (const auto &LC : LibraryCalls) { 378 setLibcallName(LC.Op, LC.Name); 379 setLibcallCallingConv(LC.Op, LC.CC); 380 } 381 } 382 383 // Use divmod compiler-rt calls for iOS 5.0 and later. 384 if (Subtarget->getTargetTriple().isiOS() && 385 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 386 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 387 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 388 } 389 390 if (Subtarget->isThumb1Only()) 391 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 392 else 393 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 394 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 395 !Subtarget->isThumb1Only()) { 396 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 397 if (!Subtarget->isFPOnlySP()) 398 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 399 400 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 401 } 402 403 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 404 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 405 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 406 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 407 setTruncStoreAction((MVT::SimpleValueType)VT, 408 (MVT::SimpleValueType)InnerVT, Expand); 409 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 410 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 411 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 412 413 setOperationAction(ISD::MULHS, (MVT::SimpleValueType)VT, Expand); 414 setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 415 setOperationAction(ISD::MULHU, (MVT::SimpleValueType)VT, Expand); 416 setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); 417 418 setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand); 419 } 420 421 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 422 setOperationAction(ISD::ConstantFP, MVT::f64, Custom); 423 424 if (Subtarget->hasNEON()) { 425 addDRTypeForNEON(MVT::v2f32); 426 addDRTypeForNEON(MVT::v8i8); 427 addDRTypeForNEON(MVT::v4i16); 428 addDRTypeForNEON(MVT::v2i32); 429 addDRTypeForNEON(MVT::v1i64); 430 431 addQRTypeForNEON(MVT::v4f32); 432 addQRTypeForNEON(MVT::v2f64); 433 addQRTypeForNEON(MVT::v16i8); 434 addQRTypeForNEON(MVT::v8i16); 435 addQRTypeForNEON(MVT::v4i32); 436 addQRTypeForNEON(MVT::v2i64); 437 438 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 439 // neither Neon nor VFP support any arithmetic operations on it. 440 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 441 // supported for v4f32. 442 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 443 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 444 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 445 // FIXME: Code duplication: FDIV and FREM are expanded always, see 446 // ARMTargetLowering::addTypeForNEON method for details. 447 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 448 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 449 // FIXME: Create unittest. 450 // In another words, find a way when "copysign" appears in DAG with vector 451 // operands. 452 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 453 // FIXME: Code duplication: SETCC has custom operation action, see 454 // ARMTargetLowering::addTypeForNEON method for details. 455 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 456 // FIXME: Create unittest for FNEG and for FABS. 457 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 458 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 459 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 460 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 461 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 462 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 463 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 464 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 465 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 466 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 467 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 468 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 469 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 470 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 471 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 472 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 473 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 474 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 475 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 476 477 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 478 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 479 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 480 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 481 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 482 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 483 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 484 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 485 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 486 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 487 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 488 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 489 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 490 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 491 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 492 493 // Mark v2f32 intrinsics. 494 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 495 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 496 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 497 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand); 498 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 499 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 500 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 501 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 502 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 503 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 504 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 505 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 506 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 507 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 508 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 509 510 // Neon does not support some operations on v1i64 and v2i64 types. 511 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 512 // Custom handling for some quad-vector types to detect VMULL. 513 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 514 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 515 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 516 // Custom handling for some vector types to avoid expensive expansions 517 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 518 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 519 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 520 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 521 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 522 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 523 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 524 // a destination type that is wider than the source, and nor does 525 // it have a FP_TO_[SU]INT instruction with a narrower destination than 526 // source. 527 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 528 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 529 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 530 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 531 532 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 533 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 534 535 // NEON does not have single instruction CTPOP for vectors with element 536 // types wider than 8-bits. However, custom lowering can leverage the 537 // v8i8/v16i8 vcnt instruction. 538 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 539 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 540 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 541 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 542 543 // NEON only has FMA instructions as of VFP4. 544 if (!Subtarget->hasVFP4()) { 545 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 546 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 547 } 548 549 setTargetDAGCombine(ISD::INTRINSIC_VOID); 550 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 551 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 552 setTargetDAGCombine(ISD::SHL); 553 setTargetDAGCombine(ISD::SRL); 554 setTargetDAGCombine(ISD::SRA); 555 setTargetDAGCombine(ISD::SIGN_EXTEND); 556 setTargetDAGCombine(ISD::ZERO_EXTEND); 557 setTargetDAGCombine(ISD::ANY_EXTEND); 558 setTargetDAGCombine(ISD::SELECT_CC); 559 setTargetDAGCombine(ISD::BUILD_VECTOR); 560 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 561 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 562 setTargetDAGCombine(ISD::STORE); 563 setTargetDAGCombine(ISD::FP_TO_SINT); 564 setTargetDAGCombine(ISD::FP_TO_UINT); 565 setTargetDAGCombine(ISD::FDIV); 566 567 // It is legal to extload from v4i8 to v4i16 or v4i32. 568 MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8, 569 MVT::v4i16, MVT::v2i16, 570 MVT::v2i32}; 571 for (unsigned i = 0; i < 6; ++i) { 572 setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal); 573 setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal); 574 setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal); 575 } 576 } 577 578 // ARM and Thumb2 support UMLAL/SMLAL. 579 if (!Subtarget->isThumb1Only()) 580 setTargetDAGCombine(ISD::ADDC); 581 582 583 computeRegisterProperties(); 584 585 // ARM does not have f32 extending load. 586 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 587 588 // ARM does not have i1 sign extending load. 589 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 590 591 // ARM supports all 4 flavors of integer indexed load / store. 592 if (!Subtarget->isThumb1Only()) { 593 for (unsigned im = (unsigned)ISD::PRE_INC; 594 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 595 setIndexedLoadAction(im, MVT::i1, Legal); 596 setIndexedLoadAction(im, MVT::i8, Legal); 597 setIndexedLoadAction(im, MVT::i16, Legal); 598 setIndexedLoadAction(im, MVT::i32, Legal); 599 setIndexedStoreAction(im, MVT::i1, Legal); 600 setIndexedStoreAction(im, MVT::i8, Legal); 601 setIndexedStoreAction(im, MVT::i16, Legal); 602 setIndexedStoreAction(im, MVT::i32, Legal); 603 } 604 } 605 606 setOperationAction(ISD::SADDO, MVT::i32, Custom); 607 setOperationAction(ISD::UADDO, MVT::i32, Custom); 608 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 609 setOperationAction(ISD::USUBO, MVT::i32, Custom); 610 611 // i64 operation support. 612 setOperationAction(ISD::MUL, MVT::i64, Expand); 613 setOperationAction(ISD::MULHU, MVT::i32, Expand); 614 if (Subtarget->isThumb1Only()) { 615 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 616 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 617 } 618 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 619 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 620 setOperationAction(ISD::MULHS, MVT::i32, Expand); 621 622 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 623 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 624 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 625 setOperationAction(ISD::SRL, MVT::i64, Custom); 626 setOperationAction(ISD::SRA, MVT::i64, Custom); 627 628 if (!Subtarget->isThumb1Only()) { 629 // FIXME: We should do this for Thumb1 as well. 630 setOperationAction(ISD::ADDC, MVT::i32, Custom); 631 setOperationAction(ISD::ADDE, MVT::i32, Custom); 632 setOperationAction(ISD::SUBC, MVT::i32, Custom); 633 setOperationAction(ISD::SUBE, MVT::i32, Custom); 634 } 635 636 // ARM does not have ROTL. 637 setOperationAction(ISD::ROTL, MVT::i32, Expand); 638 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 639 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 640 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 641 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 642 643 // These just redirect to CTTZ and CTLZ on ARM. 644 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand); 645 setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand); 646 647 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom); 648 649 // Only ARMv6 has BSWAP. 650 if (!Subtarget->hasV6Ops()) 651 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 652 653 if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) && 654 !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) { 655 // These are expanded into libcalls if the cpu doesn't have HW divider. 656 setOperationAction(ISD::SDIV, MVT::i32, Expand); 657 setOperationAction(ISD::UDIV, MVT::i32, Expand); 658 } 659 660 // FIXME: Also set divmod for SREM on EABI 661 setOperationAction(ISD::SREM, MVT::i32, Expand); 662 setOperationAction(ISD::UREM, MVT::i32, Expand); 663 // Register based DivRem for AEABI (RTABI 4.2) 664 if (Subtarget->isTargetAEABI()) { 665 setLibcallName(RTLIB::SDIVREM_I8, "__aeabi_idivmod"); 666 setLibcallName(RTLIB::SDIVREM_I16, "__aeabi_idivmod"); 667 setLibcallName(RTLIB::SDIVREM_I32, "__aeabi_idivmod"); 668 setLibcallName(RTLIB::SDIVREM_I64, "__aeabi_ldivmod"); 669 setLibcallName(RTLIB::UDIVREM_I8, "__aeabi_uidivmod"); 670 setLibcallName(RTLIB::UDIVREM_I16, "__aeabi_uidivmod"); 671 setLibcallName(RTLIB::UDIVREM_I32, "__aeabi_uidivmod"); 672 setLibcallName(RTLIB::UDIVREM_I64, "__aeabi_uldivmod"); 673 674 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS); 675 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS); 676 setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS); 677 setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS); 678 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS); 679 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS); 680 setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS); 681 setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS); 682 683 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 684 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 685 } else { 686 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 687 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 688 } 689 690 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 691 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 692 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 693 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 694 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 695 696 setOperationAction(ISD::TRAP, MVT::Other, Legal); 697 698 // Use the default implementation. 699 setOperationAction(ISD::VASTART, MVT::Other, Custom); 700 setOperationAction(ISD::VAARG, MVT::Other, Expand); 701 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 702 setOperationAction(ISD::VAEND, MVT::Other, Expand); 703 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 704 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 705 706 if (!Subtarget->isTargetMachO()) { 707 // Non-MachO platforms may return values in these registers via the 708 // personality function. 709 setExceptionPointerRegister(ARM::R0); 710 setExceptionSelectorRegister(ARM::R1); 711 } 712 713 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 714 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 715 else 716 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 717 718 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 719 // the default expansion. 720 if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) { 721 // ATOMIC_FENCE needs custom lowering; the others should have been expanded 722 // to ldrex/strex loops already. 723 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 724 725 // On v8, we have particularly efficient implementations of atomic fences 726 // if they can be combined with nearby atomic loads and stores. 727 if (!Subtarget->hasV8Ops()) { 728 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 729 setInsertFencesForAtomic(true); 730 } 731 } else { 732 // If there's anything we can use as a barrier, go through custom lowering 733 // for ATOMIC_FENCE. 734 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, 735 Subtarget->hasAnyDataBarrier() ? Custom : Expand); 736 737 // Set them all for expansion, which will force libcalls. 738 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 739 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 740 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 741 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 742 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 743 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 744 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 745 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 746 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 747 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 748 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 749 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 750 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 751 // Unordered/Monotonic case. 752 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 753 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 754 } 755 756 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 757 758 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 759 if (!Subtarget->hasV6Ops()) { 760 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 761 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 762 } 763 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 764 765 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 766 !Subtarget->isThumb1Only()) { 767 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 768 // iff target supports vfp2. 769 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 770 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 771 } 772 773 // We want to custom lower some of our intrinsics. 774 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 775 if (Subtarget->isTargetDarwin()) { 776 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 777 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 778 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 779 } 780 781 setOperationAction(ISD::SETCC, MVT::i32, Expand); 782 setOperationAction(ISD::SETCC, MVT::f32, Expand); 783 setOperationAction(ISD::SETCC, MVT::f64, Expand); 784 setOperationAction(ISD::SELECT, MVT::i32, Custom); 785 setOperationAction(ISD::SELECT, MVT::f32, Custom); 786 setOperationAction(ISD::SELECT, MVT::f64, Custom); 787 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 788 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 789 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 790 791 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 792 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 793 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 794 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 795 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 796 797 // We don't support sin/cos/fmod/copysign/pow 798 setOperationAction(ISD::FSIN, MVT::f64, Expand); 799 setOperationAction(ISD::FSIN, MVT::f32, Expand); 800 setOperationAction(ISD::FCOS, MVT::f32, Expand); 801 setOperationAction(ISD::FCOS, MVT::f64, Expand); 802 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 803 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 804 setOperationAction(ISD::FREM, MVT::f64, Expand); 805 setOperationAction(ISD::FREM, MVT::f32, Expand); 806 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 807 !Subtarget->isThumb1Only()) { 808 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 809 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 810 } 811 setOperationAction(ISD::FPOW, MVT::f64, Expand); 812 setOperationAction(ISD::FPOW, MVT::f32, Expand); 813 814 if (!Subtarget->hasVFP4()) { 815 setOperationAction(ISD::FMA, MVT::f64, Expand); 816 setOperationAction(ISD::FMA, MVT::f32, Expand); 817 } 818 819 // Various VFP goodness 820 if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) { 821 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 822 if (Subtarget->hasVFP2()) { 823 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 824 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 825 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 826 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 827 } 828 // Special handling for half-precision FP. 829 if (!Subtarget->hasFP16()) { 830 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 831 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 832 } 833 } 834 835 // Combine sin / cos into one node or libcall if possible. 836 if (Subtarget->hasSinCos()) { 837 setLibcallName(RTLIB::SINCOS_F32, "sincosf"); 838 setLibcallName(RTLIB::SINCOS_F64, "sincos"); 839 if (Subtarget->getTargetTriple().getOS() == Triple::IOS) { 840 // For iOS, we don't want to the normal expansion of a libcall to 841 // sincos. We want to issue a libcall to __sincos_stret. 842 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 843 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 844 } 845 } 846 847 // We have target-specific dag combine patterns for the following nodes: 848 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 849 setTargetDAGCombine(ISD::ADD); 850 setTargetDAGCombine(ISD::SUB); 851 setTargetDAGCombine(ISD::MUL); 852 setTargetDAGCombine(ISD::AND); 853 setTargetDAGCombine(ISD::OR); 854 setTargetDAGCombine(ISD::XOR); 855 856 if (Subtarget->hasV6Ops()) 857 setTargetDAGCombine(ISD::SRL); 858 859 setStackPointerRegisterToSaveRestore(ARM::SP); 860 861 if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() || 862 !Subtarget->hasVFP2()) 863 setSchedulingPreference(Sched::RegPressure); 864 else 865 setSchedulingPreference(Sched::Hybrid); 866 867 //// temporary - rewrite interface to use type 868 MaxStoresPerMemset = 8; 869 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 870 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 871 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2; 872 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 873 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2; 874 875 // On ARM arguments smaller than 4 bytes are extended, so all arguments 876 // are at least 4 bytes aligned. 877 setMinStackArgumentAlignment(4); 878 879 // Prefer likely predicted branches to selects on out-of-order cores. 880 PredictableSelectIsExpensive = Subtarget->isLikeA9(); 881 882 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 883 } 884 885 // FIXME: It might make sense to define the representative register class as the 886 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is 887 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 888 // SPR's representative would be DPR_VFP2. This should work well if register 889 // pressure tracking were modified such that a register use would increment the 890 // pressure of the register class's representative and all of it's super 891 // classes' representatives transitively. We have not implemented this because 892 // of the difficulty prior to coalescing of modeling operand register classes 893 // due to the common occurrence of cross class copies and subregister insertions 894 // and extractions. 895 std::pair<const TargetRegisterClass*, uint8_t> 896 ARMTargetLowering::findRepresentativeClass(MVT VT) const{ 897 const TargetRegisterClass *RRC = nullptr; 898 uint8_t Cost = 1; 899 switch (VT.SimpleTy) { 900 default: 901 return TargetLowering::findRepresentativeClass(VT); 902 // Use DPR as representative register class for all floating point 903 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 904 // the cost is 1 for both f32 and f64. 905 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 906 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 907 RRC = &ARM::DPRRegClass; 908 // When NEON is used for SP, only half of the register file is available 909 // because operations that define both SP and DP results will be constrained 910 // to the VFP2 class (D0-D15). We currently model this constraint prior to 911 // coalescing by double-counting the SP regs. See the FIXME above. 912 if (Subtarget->useNEONForSinglePrecisionFP()) 913 Cost = 2; 914 break; 915 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 916 case MVT::v4f32: case MVT::v2f64: 917 RRC = &ARM::DPRRegClass; 918 Cost = 2; 919 break; 920 case MVT::v4i64: 921 RRC = &ARM::DPRRegClass; 922 Cost = 4; 923 break; 924 case MVT::v8i64: 925 RRC = &ARM::DPRRegClass; 926 Cost = 8; 927 break; 928 } 929 return std::make_pair(RRC, Cost); 930 } 931 932 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 933 switch (Opcode) { 934 default: return nullptr; 935 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 936 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 937 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 938 case ARMISD::CALL: return "ARMISD::CALL"; 939 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 940 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 941 case ARMISD::tCALL: return "ARMISD::tCALL"; 942 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 943 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 944 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 945 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 946 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG"; 947 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 948 case ARMISD::CMP: return "ARMISD::CMP"; 949 case ARMISD::CMN: return "ARMISD::CMN"; 950 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 951 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 952 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 953 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 954 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 955 956 case ARMISD::CMOV: return "ARMISD::CMOV"; 957 958 case ARMISD::RBIT: return "ARMISD::RBIT"; 959 960 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 961 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 962 case ARMISD::SITOF: return "ARMISD::SITOF"; 963 case ARMISD::UITOF: return "ARMISD::UITOF"; 964 965 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 966 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 967 case ARMISD::RRX: return "ARMISD::RRX"; 968 969 case ARMISD::ADDC: return "ARMISD::ADDC"; 970 case ARMISD::ADDE: return "ARMISD::ADDE"; 971 case ARMISD::SUBC: return "ARMISD::SUBC"; 972 case ARMISD::SUBE: return "ARMISD::SUBE"; 973 974 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 975 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 976 977 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 978 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 979 980 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 981 982 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 983 984 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 985 986 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 987 988 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 989 990 case ARMISD::WIN__CHKSTK: return "ARMISD:::WIN__CHKSTK"; 991 992 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 993 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 994 case ARMISD::VCGE: return "ARMISD::VCGE"; 995 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 996 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 997 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 998 case ARMISD::VCGT: return "ARMISD::VCGT"; 999 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 1000 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 1001 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 1002 case ARMISD::VTST: return "ARMISD::VTST"; 1003 1004 case ARMISD::VSHL: return "ARMISD::VSHL"; 1005 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 1006 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 1007 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 1008 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 1009 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 1010 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 1011 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 1012 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 1013 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 1014 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 1015 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 1016 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 1017 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 1018 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 1019 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1020 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1021 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1022 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1023 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1024 case ARMISD::VDUP: return "ARMISD::VDUP"; 1025 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1026 case ARMISD::VEXT: return "ARMISD::VEXT"; 1027 case ARMISD::VREV64: return "ARMISD::VREV64"; 1028 case ARMISD::VREV32: return "ARMISD::VREV32"; 1029 case ARMISD::VREV16: return "ARMISD::VREV16"; 1030 case ARMISD::VZIP: return "ARMISD::VZIP"; 1031 case ARMISD::VUZP: return "ARMISD::VUZP"; 1032 case ARMISD::VTRN: return "ARMISD::VTRN"; 1033 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1034 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1035 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1036 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1037 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1038 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1039 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1040 case ARMISD::FMAX: return "ARMISD::FMAX"; 1041 case ARMISD::FMIN: return "ARMISD::FMIN"; 1042 case ARMISD::VMAXNM: return "ARMISD::VMAX"; 1043 case ARMISD::VMINNM: return "ARMISD::VMIN"; 1044 case ARMISD::BFI: return "ARMISD::BFI"; 1045 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1046 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1047 case ARMISD::VBSL: return "ARMISD::VBSL"; 1048 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1049 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1050 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1051 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1052 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1053 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1054 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1055 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1056 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1057 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1058 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1059 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1060 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1061 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1062 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1063 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1064 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1065 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1066 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1067 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1068 } 1069 } 1070 1071 EVT ARMTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 1072 if (!VT.isVector()) return getPointerTy(); 1073 return VT.changeVectorElementTypeToInteger(); 1074 } 1075 1076 /// getRegClassFor - Return the register class that should be used for the 1077 /// specified value type. 1078 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { 1079 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1080 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1081 // load / store 4 to 8 consecutive D registers. 1082 if (Subtarget->hasNEON()) { 1083 if (VT == MVT::v4i64) 1084 return &ARM::QQPRRegClass; 1085 if (VT == MVT::v8i64) 1086 return &ARM::QQQQPRRegClass; 1087 } 1088 return TargetLowering::getRegClassFor(VT); 1089 } 1090 1091 // Create a fast isel object. 1092 FastISel * 1093 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1094 const TargetLibraryInfo *libInfo) const { 1095 return ARM::createFastISel(funcInfo, libInfo); 1096 } 1097 1098 /// getMaximalGlobalOffset - Returns the maximal possible offset which can 1099 /// be used for loads / stores from the global. 1100 unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 1101 return (Subtarget->isThumb1Only() ? 127 : 4095); 1102 } 1103 1104 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1105 unsigned NumVals = N->getNumValues(); 1106 if (!NumVals) 1107 return Sched::RegPressure; 1108 1109 for (unsigned i = 0; i != NumVals; ++i) { 1110 EVT VT = N->getValueType(i); 1111 if (VT == MVT::Glue || VT == MVT::Other) 1112 continue; 1113 if (VT.isFloatingPoint() || VT.isVector()) 1114 return Sched::ILP; 1115 } 1116 1117 if (!N->isMachineOpcode()) 1118 return Sched::RegPressure; 1119 1120 // Load are scheduled for latency even if there instruction itinerary 1121 // is not available. 1122 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1123 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1124 1125 if (MCID.getNumDefs() == 0) 1126 return Sched::RegPressure; 1127 if (!Itins->isEmpty() && 1128 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1129 return Sched::ILP; 1130 1131 return Sched::RegPressure; 1132 } 1133 1134 //===----------------------------------------------------------------------===// 1135 // Lowering Code 1136 //===----------------------------------------------------------------------===// 1137 1138 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1139 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1140 switch (CC) { 1141 default: llvm_unreachable("Unknown condition code!"); 1142 case ISD::SETNE: return ARMCC::NE; 1143 case ISD::SETEQ: return ARMCC::EQ; 1144 case ISD::SETGT: return ARMCC::GT; 1145 case ISD::SETGE: return ARMCC::GE; 1146 case ISD::SETLT: return ARMCC::LT; 1147 case ISD::SETLE: return ARMCC::LE; 1148 case ISD::SETUGT: return ARMCC::HI; 1149 case ISD::SETUGE: return ARMCC::HS; 1150 case ISD::SETULT: return ARMCC::LO; 1151 case ISD::SETULE: return ARMCC::LS; 1152 } 1153 } 1154 1155 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1156 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1157 ARMCC::CondCodes &CondCode2) { 1158 CondCode2 = ARMCC::AL; 1159 switch (CC) { 1160 default: llvm_unreachable("Unknown FP condition!"); 1161 case ISD::SETEQ: 1162 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1163 case ISD::SETGT: 1164 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1165 case ISD::SETGE: 1166 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1167 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1168 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1169 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1170 case ISD::SETO: CondCode = ARMCC::VC; break; 1171 case ISD::SETUO: CondCode = ARMCC::VS; break; 1172 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1173 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1174 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1175 case ISD::SETLT: 1176 case ISD::SETULT: CondCode = ARMCC::LT; break; 1177 case ISD::SETLE: 1178 case ISD::SETULE: CondCode = ARMCC::LE; break; 1179 case ISD::SETNE: 1180 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1181 } 1182 } 1183 1184 //===----------------------------------------------------------------------===// 1185 // Calling Convention Implementation 1186 //===----------------------------------------------------------------------===// 1187 1188 #include "ARMGenCallingConv.inc" 1189 1190 /// getEffectiveCallingConv - Get the effective calling convention, taking into 1191 /// account presence of floating point hardware and calling convention 1192 /// limitations, such as support for variadic functions. 1193 CallingConv::ID 1194 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, 1195 bool isVarArg) const { 1196 switch (CC) { 1197 default: 1198 llvm_unreachable("Unsupported calling convention"); 1199 case CallingConv::ARM_AAPCS: 1200 case CallingConv::ARM_APCS: 1201 case CallingConv::GHC: 1202 return CC; 1203 case CallingConv::ARM_AAPCS_VFP: 1204 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP; 1205 case CallingConv::C: 1206 if (!Subtarget->isAAPCS_ABI()) 1207 return CallingConv::ARM_APCS; 1208 else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && 1209 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1210 !isVarArg) 1211 return CallingConv::ARM_AAPCS_VFP; 1212 else 1213 return CallingConv::ARM_AAPCS; 1214 case CallingConv::Fast: 1215 if (!Subtarget->isAAPCS_ABI()) { 1216 if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1217 return CallingConv::Fast; 1218 return CallingConv::ARM_APCS; 1219 } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) 1220 return CallingConv::ARM_AAPCS_VFP; 1221 else 1222 return CallingConv::ARM_AAPCS; 1223 } 1224 } 1225 1226 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given 1227 /// CallingConvention. 1228 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1229 bool Return, 1230 bool isVarArg) const { 1231 switch (getEffectiveCallingConv(CC, isVarArg)) { 1232 default: 1233 llvm_unreachable("Unsupported calling convention"); 1234 case CallingConv::ARM_APCS: 1235 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1236 case CallingConv::ARM_AAPCS: 1237 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1238 case CallingConv::ARM_AAPCS_VFP: 1239 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1240 case CallingConv::Fast: 1241 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1242 case CallingConv::GHC: 1243 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1244 } 1245 } 1246 1247 /// LowerCallResult - Lower the result values of a call into the 1248 /// appropriate copies out of appropriate physical registers. 1249 SDValue 1250 ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1251 CallingConv::ID CallConv, bool isVarArg, 1252 const SmallVectorImpl<ISD::InputArg> &Ins, 1253 SDLoc dl, SelectionDAG &DAG, 1254 SmallVectorImpl<SDValue> &InVals, 1255 bool isThisReturn, SDValue ThisVal) const { 1256 1257 // Assign locations to each value returned by this call. 1258 SmallVector<CCValAssign, 16> RVLocs; 1259 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1260 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1261 CCInfo.AnalyzeCallResult(Ins, 1262 CCAssignFnForNode(CallConv, /* Return*/ true, 1263 isVarArg)); 1264 1265 // Copy all of the result registers out of their specified physreg. 1266 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1267 CCValAssign VA = RVLocs[i]; 1268 1269 // Pass 'this' value directly from the argument to return value, to avoid 1270 // reg unit interference 1271 if (i == 0 && isThisReturn) { 1272 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 1273 "unexpected return calling convention register assignment"); 1274 InVals.push_back(ThisVal); 1275 continue; 1276 } 1277 1278 SDValue Val; 1279 if (VA.needsCustom()) { 1280 // Handle f64 or half of a v2f64. 1281 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1282 InFlag); 1283 Chain = Lo.getValue(1); 1284 InFlag = Lo.getValue(2); 1285 VA = RVLocs[++i]; // skip ahead to next loc 1286 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1287 InFlag); 1288 Chain = Hi.getValue(1); 1289 InFlag = Hi.getValue(2); 1290 if (!Subtarget->isLittle()) 1291 std::swap (Lo, Hi); 1292 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1293 1294 if (VA.getLocVT() == MVT::v2f64) { 1295 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1296 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1297 DAG.getConstant(0, MVT::i32)); 1298 1299 VA = RVLocs[++i]; // skip ahead to next loc 1300 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1301 Chain = Lo.getValue(1); 1302 InFlag = Lo.getValue(2); 1303 VA = RVLocs[++i]; // skip ahead to next loc 1304 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1305 Chain = Hi.getValue(1); 1306 InFlag = Hi.getValue(2); 1307 if (!Subtarget->isLittle()) 1308 std::swap (Lo, Hi); 1309 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1310 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1311 DAG.getConstant(1, MVT::i32)); 1312 } 1313 } else { 1314 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1315 InFlag); 1316 Chain = Val.getValue(1); 1317 InFlag = Val.getValue(2); 1318 } 1319 1320 switch (VA.getLocInfo()) { 1321 default: llvm_unreachable("Unknown loc info!"); 1322 case CCValAssign::Full: break; 1323 case CCValAssign::BCvt: 1324 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1325 break; 1326 } 1327 1328 InVals.push_back(Val); 1329 } 1330 1331 return Chain; 1332 } 1333 1334 /// LowerMemOpCallTo - Store the argument to the stack. 1335 SDValue 1336 ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1337 SDValue StackPtr, SDValue Arg, 1338 SDLoc dl, SelectionDAG &DAG, 1339 const CCValAssign &VA, 1340 ISD::ArgFlagsTy Flags) const { 1341 unsigned LocMemOffset = VA.getLocMemOffset(); 1342 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1343 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1344 return DAG.getStore(Chain, dl, Arg, PtrOff, 1345 MachinePointerInfo::getStack(LocMemOffset), 1346 false, false, 0); 1347 } 1348 1349 void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG, 1350 SDValue Chain, SDValue &Arg, 1351 RegsToPassVector &RegsToPass, 1352 CCValAssign &VA, CCValAssign &NextVA, 1353 SDValue &StackPtr, 1354 SmallVectorImpl<SDValue> &MemOpChains, 1355 ISD::ArgFlagsTy Flags) const { 1356 1357 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1358 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1359 unsigned id = Subtarget->isLittle() ? 0 : 1; 1360 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id))); 1361 1362 if (NextVA.isRegLoc()) 1363 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id))); 1364 else { 1365 assert(NextVA.isMemLoc()); 1366 if (!StackPtr.getNode()) 1367 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1368 1369 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id), 1370 dl, DAG, NextVA, 1371 Flags)); 1372 } 1373 } 1374 1375 /// LowerCall - Lowering a call into a callseq_start <- 1376 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1377 /// nodes. 1378 SDValue 1379 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1380 SmallVectorImpl<SDValue> &InVals) const { 1381 SelectionDAG &DAG = CLI.DAG; 1382 SDLoc &dl = CLI.DL; 1383 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1384 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1385 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1386 SDValue Chain = CLI.Chain; 1387 SDValue Callee = CLI.Callee; 1388 bool &isTailCall = CLI.IsTailCall; 1389 CallingConv::ID CallConv = CLI.CallConv; 1390 bool doesNotRet = CLI.DoesNotReturn; 1391 bool isVarArg = CLI.IsVarArg; 1392 1393 MachineFunction &MF = DAG.getMachineFunction(); 1394 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1395 bool isThisReturn = false; 1396 bool isSibCall = false; 1397 1398 // Disable tail calls if they're not supported. 1399 if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls) 1400 isTailCall = false; 1401 1402 if (isTailCall) { 1403 // Check if it's really possible to do a tail call. 1404 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1405 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), 1406 Outs, OutVals, Ins, DAG); 1407 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall()) 1408 report_fatal_error("failed to perform tail call elimination on a call " 1409 "site marked musttail"); 1410 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1411 // detected sibcalls. 1412 if (isTailCall) { 1413 ++NumTailCalls; 1414 isSibCall = true; 1415 } 1416 } 1417 1418 // Analyze operands of the call, assigning locations to each operand. 1419 SmallVector<CCValAssign, 16> ArgLocs; 1420 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1421 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1422 CCInfo.AnalyzeCallOperands(Outs, 1423 CCAssignFnForNode(CallConv, /* Return*/ false, 1424 isVarArg)); 1425 1426 // Get a count of how many bytes are to be pushed on the stack. 1427 unsigned NumBytes = CCInfo.getNextStackOffset(); 1428 1429 // For tail calls, memory operands are available in our caller's stack. 1430 if (isSibCall) 1431 NumBytes = 0; 1432 1433 // Adjust the stack pointer for the new arguments... 1434 // These operations are automatically eliminated by the prolog/epilog pass 1435 if (!isSibCall) 1436 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 1437 dl); 1438 1439 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1440 1441 RegsToPassVector RegsToPass; 1442 SmallVector<SDValue, 8> MemOpChains; 1443 1444 // Walk the register/memloc assignments, inserting copies/loads. In the case 1445 // of tail call optimization, arguments are handled later. 1446 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1447 i != e; 1448 ++i, ++realArgIdx) { 1449 CCValAssign &VA = ArgLocs[i]; 1450 SDValue Arg = OutVals[realArgIdx]; 1451 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1452 bool isByVal = Flags.isByVal(); 1453 1454 // Promote the value if needed. 1455 switch (VA.getLocInfo()) { 1456 default: llvm_unreachable("Unknown loc info!"); 1457 case CCValAssign::Full: break; 1458 case CCValAssign::SExt: 1459 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1460 break; 1461 case CCValAssign::ZExt: 1462 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1463 break; 1464 case CCValAssign::AExt: 1465 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1466 break; 1467 case CCValAssign::BCvt: 1468 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1469 break; 1470 } 1471 1472 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1473 if (VA.needsCustom()) { 1474 if (VA.getLocVT() == MVT::v2f64) { 1475 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1476 DAG.getConstant(0, MVT::i32)); 1477 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1478 DAG.getConstant(1, MVT::i32)); 1479 1480 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1481 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1482 1483 VA = ArgLocs[++i]; // skip ahead to next loc 1484 if (VA.isRegLoc()) { 1485 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1486 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1487 } else { 1488 assert(VA.isMemLoc()); 1489 1490 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1491 dl, DAG, VA, Flags)); 1492 } 1493 } else { 1494 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1495 StackPtr, MemOpChains, Flags); 1496 } 1497 } else if (VA.isRegLoc()) { 1498 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) { 1499 assert(VA.getLocVT() == MVT::i32 && 1500 "unexpected calling convention register assignment"); 1501 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 1502 "unexpected use of 'returned'"); 1503 isThisReturn = true; 1504 } 1505 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1506 } else if (isByVal) { 1507 assert(VA.isMemLoc()); 1508 unsigned offset = 0; 1509 1510 // True if this byval aggregate will be split between registers 1511 // and memory. 1512 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 1513 unsigned CurByValIdx = CCInfo.getInRegsParamsProceed(); 1514 1515 if (CurByValIdx < ByValArgsCount) { 1516 1517 unsigned RegBegin, RegEnd; 1518 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 1519 1520 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1521 unsigned int i, j; 1522 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 1523 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1524 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1525 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1526 MachinePointerInfo(), 1527 false, false, false, 1528 DAG.InferPtrAlignment(AddArg)); 1529 MemOpChains.push_back(Load.getValue(1)); 1530 RegsToPass.push_back(std::make_pair(j, Load)); 1531 } 1532 1533 // If parameter size outsides register area, "offset" value 1534 // helps us to calculate stack slot for remained part properly. 1535 offset = RegEnd - RegBegin; 1536 1537 CCInfo.nextInRegsParam(); 1538 } 1539 1540 if (Flags.getByValSize() > 4*offset) { 1541 unsigned LocMemOffset = VA.getLocMemOffset(); 1542 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1543 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1544 StkPtrOff); 1545 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1546 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1547 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1548 MVT::i32); 1549 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32); 1550 1551 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1552 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1553 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1554 Ops)); 1555 } 1556 } else if (!isSibCall) { 1557 assert(VA.isMemLoc()); 1558 1559 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1560 dl, DAG, VA, Flags)); 1561 } 1562 } 1563 1564 if (!MemOpChains.empty()) 1565 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 1566 1567 // Build a sequence of copy-to-reg nodes chained together with token chain 1568 // and flag operands which copy the outgoing args into the appropriate regs. 1569 SDValue InFlag; 1570 // Tail call byval lowering might overwrite argument registers so in case of 1571 // tail call optimization the copies to registers are lowered later. 1572 if (!isTailCall) 1573 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1574 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1575 RegsToPass[i].second, InFlag); 1576 InFlag = Chain.getValue(1); 1577 } 1578 1579 // For tail calls lower the arguments to the 'real' stack slot. 1580 if (isTailCall) { 1581 // Force all the incoming stack arguments to be loaded from the stack 1582 // before any new outgoing arguments are stored to the stack, because the 1583 // outgoing stack slots may alias the incoming argument stack slots, and 1584 // the alias isn't otherwise explicit. This is slightly more conservative 1585 // than necessary, because it means that each store effectively depends 1586 // on every argument instead of just those arguments it would clobber. 1587 1588 // Do not flag preceding copytoreg stuff together with the following stuff. 1589 InFlag = SDValue(); 1590 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1591 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1592 RegsToPass[i].second, InFlag); 1593 InFlag = Chain.getValue(1); 1594 } 1595 InFlag = SDValue(); 1596 } 1597 1598 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1599 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1600 // node so that legalize doesn't hack it. 1601 bool isDirect = false; 1602 bool isARMFunc = false; 1603 bool isLocalARMFunc = false; 1604 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1605 1606 if (EnableARMLongCalls) { 1607 assert((Subtarget->isTargetWindows() || 1608 getTargetMachine().getRelocationModel() == Reloc::Static) && 1609 "long-calls with non-static relocation model!"); 1610 // Handle a global address or an external symbol. If it's not one of 1611 // those, the target's already in a register, so we don't need to do 1612 // anything extra. 1613 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1614 const GlobalValue *GV = G->getGlobal(); 1615 // Create a constant pool entry for the callee address 1616 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1617 ARMConstantPoolValue *CPV = 1618 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1619 1620 // Get the address of the callee into a register 1621 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1622 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1623 Callee = DAG.getLoad(getPointerTy(), dl, 1624 DAG.getEntryNode(), CPAddr, 1625 MachinePointerInfo::getConstantPool(), 1626 false, false, false, 0); 1627 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1628 const char *Sym = S->getSymbol(); 1629 1630 // Create a constant pool entry for the callee address 1631 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1632 ARMConstantPoolValue *CPV = 1633 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1634 ARMPCLabelIndex, 0); 1635 // Get the address of the callee into a register 1636 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1637 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1638 Callee = DAG.getLoad(getPointerTy(), dl, 1639 DAG.getEntryNode(), CPAddr, 1640 MachinePointerInfo::getConstantPool(), 1641 false, false, false, 0); 1642 } 1643 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1644 const GlobalValue *GV = G->getGlobal(); 1645 isDirect = true; 1646 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1647 bool isStub = (isExt && Subtarget->isTargetMachO()) && 1648 getTargetMachine().getRelocationModel() != Reloc::Static; 1649 isARMFunc = !Subtarget->isThumb() || isStub; 1650 // ARM call to a local ARM function is predicable. 1651 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1652 // tBX takes a register source operand. 1653 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1654 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?"); 1655 Callee = DAG.getNode(ARMISD::WrapperPIC, dl, getPointerTy(), 1656 DAG.getTargetGlobalAddress(GV, dl, getPointerTy())); 1657 } else if (Subtarget->isTargetCOFF()) { 1658 assert(Subtarget->isTargetWindows() && 1659 "Windows is the only supported COFF target"); 1660 unsigned TargetFlags = GV->hasDLLImportStorageClass() 1661 ? ARMII::MO_DLLIMPORT 1662 : ARMII::MO_NO_FLAG; 1663 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), /*Offset=*/0, 1664 TargetFlags); 1665 if (GV->hasDLLImportStorageClass()) 1666 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), 1667 DAG.getNode(ARMISD::Wrapper, dl, getPointerTy(), 1668 Callee), MachinePointerInfo::getGOT(), 1669 false, false, false, 0); 1670 } else { 1671 // On ELF targets for PIC code, direct calls should go through the PLT 1672 unsigned OpFlags = 0; 1673 if (Subtarget->isTargetELF() && 1674 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1675 OpFlags = ARMII::MO_PLT; 1676 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1677 } 1678 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1679 isDirect = true; 1680 bool isStub = Subtarget->isTargetMachO() && 1681 getTargetMachine().getRelocationModel() != Reloc::Static; 1682 isARMFunc = !Subtarget->isThumb() || isStub; 1683 // tBX takes a register source operand. 1684 const char *Sym = S->getSymbol(); 1685 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1686 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1687 ARMConstantPoolValue *CPV = 1688 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1689 ARMPCLabelIndex, 4); 1690 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1691 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1692 Callee = DAG.getLoad(getPointerTy(), dl, 1693 DAG.getEntryNode(), CPAddr, 1694 MachinePointerInfo::getConstantPool(), 1695 false, false, false, 0); 1696 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1697 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1698 getPointerTy(), Callee, PICLabel); 1699 } else { 1700 unsigned OpFlags = 0; 1701 // On ELF targets for PIC code, direct calls should go through the PLT 1702 if (Subtarget->isTargetELF() && 1703 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1704 OpFlags = ARMII::MO_PLT; 1705 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1706 } 1707 } 1708 1709 // FIXME: handle tail calls differently. 1710 unsigned CallOpc; 1711 bool HasMinSizeAttr = MF.getFunction()->getAttributes().hasAttribute( 1712 AttributeSet::FunctionIndex, Attribute::MinSize); 1713 if (Subtarget->isThumb()) { 1714 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1715 CallOpc = ARMISD::CALL_NOLINK; 1716 else 1717 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1718 } else { 1719 if (!isDirect && !Subtarget->hasV5TOps()) 1720 CallOpc = ARMISD::CALL_NOLINK; 1721 else if (doesNotRet && isDirect && Subtarget->hasRAS() && 1722 // Emit regular call when code size is the priority 1723 !HasMinSizeAttr) 1724 // "mov lr, pc; b _foo" to avoid confusing the RSP 1725 CallOpc = ARMISD::CALL_NOLINK; 1726 else 1727 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 1728 } 1729 1730 std::vector<SDValue> Ops; 1731 Ops.push_back(Chain); 1732 Ops.push_back(Callee); 1733 1734 // Add argument registers to the end of the list so that they are known live 1735 // into the call. 1736 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1737 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1738 RegsToPass[i].second.getValueType())); 1739 1740 // Add a register mask operand representing the call-preserved registers. 1741 if (!isTailCall) { 1742 const uint32_t *Mask; 1743 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1744 const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI); 1745 if (isThisReturn) { 1746 // For 'this' returns, use the R0-preserving mask if applicable 1747 Mask = ARI->getThisReturnPreservedMask(CallConv); 1748 if (!Mask) { 1749 // Set isThisReturn to false if the calling convention is not one that 1750 // allows 'returned' to be modeled in this way, so LowerCallResult does 1751 // not try to pass 'this' straight through 1752 isThisReturn = false; 1753 Mask = ARI->getCallPreservedMask(CallConv); 1754 } 1755 } else 1756 Mask = ARI->getCallPreservedMask(CallConv); 1757 1758 assert(Mask && "Missing call preserved mask for calling convention"); 1759 Ops.push_back(DAG.getRegisterMask(Mask)); 1760 } 1761 1762 if (InFlag.getNode()) 1763 Ops.push_back(InFlag); 1764 1765 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1766 if (isTailCall) 1767 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops); 1768 1769 // Returns a chain and a flag for retval copy to use. 1770 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 1771 InFlag = Chain.getValue(1); 1772 1773 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1774 DAG.getIntPtrConstant(0, true), InFlag, dl); 1775 if (!Ins.empty()) 1776 InFlag = Chain.getValue(1); 1777 1778 // Handle result values, copying them out of physregs into vregs that we 1779 // return. 1780 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 1781 InVals, isThisReturn, 1782 isThisReturn ? OutVals[0] : SDValue()); 1783 } 1784 1785 /// HandleByVal - Every parameter *after* a byval parameter is passed 1786 /// on the stack. Remember the next parameter register to allocate, 1787 /// and then confiscate the rest of the parameter registers to insure 1788 /// this. 1789 void 1790 ARMTargetLowering::HandleByVal( 1791 CCState *State, unsigned &size, unsigned Align) const { 1792 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1793 assert((State->getCallOrPrologue() == Prologue || 1794 State->getCallOrPrologue() == Call) && 1795 "unhandled ParmContext"); 1796 1797 if ((ARM::R0 <= reg) && (reg <= ARM::R3)) { 1798 if (Subtarget->isAAPCS_ABI() && Align > 4) { 1799 unsigned AlignInRegs = Align / 4; 1800 unsigned Waste = (ARM::R4 - reg) % AlignInRegs; 1801 for (unsigned i = 0; i < Waste; ++i) 1802 reg = State->AllocateReg(GPRArgRegs, 4); 1803 } 1804 if (reg != 0) { 1805 unsigned excess = 4 * (ARM::R4 - reg); 1806 1807 // Special case when NSAA != SP and parameter size greater than size of 1808 // all remained GPR regs. In that case we can't split parameter, we must 1809 // send it to stack. We also must set NCRN to R4, so waste all 1810 // remained registers. 1811 const unsigned NSAAOffset = State->getNextStackOffset(); 1812 if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) { 1813 while (State->AllocateReg(GPRArgRegs, 4)) 1814 ; 1815 return; 1816 } 1817 1818 // First register for byval parameter is the first register that wasn't 1819 // allocated before this method call, so it would be "reg". 1820 // If parameter is small enough to be saved in range [reg, r4), then 1821 // the end (first after last) register would be reg + param-size-in-regs, 1822 // else parameter would be splitted between registers and stack, 1823 // end register would be r4 in this case. 1824 unsigned ByValRegBegin = reg; 1825 unsigned ByValRegEnd = (size < excess) ? reg + size/4 : (unsigned)ARM::R4; 1826 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 1827 // Note, first register is allocated in the beginning of function already, 1828 // allocate remained amount of registers we need. 1829 for (unsigned i = reg+1; i != ByValRegEnd; ++i) 1830 State->AllocateReg(GPRArgRegs, 4); 1831 // A byval parameter that is split between registers and memory needs its 1832 // size truncated here. 1833 // In the case where the entire structure fits in registers, we set the 1834 // size in memory to zero. 1835 if (size < excess) 1836 size = 0; 1837 else 1838 size -= excess; 1839 } 1840 } 1841 } 1842 1843 /// MatchingStackOffset - Return true if the given stack call argument is 1844 /// already available in the same position (relatively) of the caller's 1845 /// incoming argument stack. 1846 static 1847 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1848 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1849 const TargetInstrInfo *TII) { 1850 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1851 int FI = INT_MAX; 1852 if (Arg.getOpcode() == ISD::CopyFromReg) { 1853 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1854 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1855 return false; 1856 MachineInstr *Def = MRI->getVRegDef(VR); 1857 if (!Def) 1858 return false; 1859 if (!Flags.isByVal()) { 1860 if (!TII->isLoadFromStackSlot(Def, FI)) 1861 return false; 1862 } else { 1863 return false; 1864 } 1865 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1866 if (Flags.isByVal()) 1867 // ByVal argument is passed in as a pointer but it's now being 1868 // dereferenced. e.g. 1869 // define @foo(%struct.X* %A) { 1870 // tail call @bar(%struct.X* byval %A) 1871 // } 1872 return false; 1873 SDValue Ptr = Ld->getBasePtr(); 1874 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1875 if (!FINode) 1876 return false; 1877 FI = FINode->getIndex(); 1878 } else 1879 return false; 1880 1881 assert(FI != INT_MAX); 1882 if (!MFI->isFixedObjectIndex(FI)) 1883 return false; 1884 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1885 } 1886 1887 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 1888 /// for tail call optimization. Targets which want to do tail call 1889 /// optimization should implement this function. 1890 bool 1891 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1892 CallingConv::ID CalleeCC, 1893 bool isVarArg, 1894 bool isCalleeStructRet, 1895 bool isCallerStructRet, 1896 const SmallVectorImpl<ISD::OutputArg> &Outs, 1897 const SmallVectorImpl<SDValue> &OutVals, 1898 const SmallVectorImpl<ISD::InputArg> &Ins, 1899 SelectionDAG& DAG) const { 1900 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1901 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1902 bool CCMatch = CallerCC == CalleeCC; 1903 1904 // Look for obvious safe cases to perform tail call optimization that do not 1905 // require ABI changes. This is what gcc calls sibcall. 1906 1907 // Do not sibcall optimize vararg calls unless the call site is not passing 1908 // any arguments. 1909 if (isVarArg && !Outs.empty()) 1910 return false; 1911 1912 // Exception-handling functions need a special set of instructions to indicate 1913 // a return to the hardware. Tail-calling another function would probably 1914 // break this. 1915 if (CallerF->hasFnAttribute("interrupt")) 1916 return false; 1917 1918 // Also avoid sibcall optimization if either caller or callee uses struct 1919 // return semantics. 1920 if (isCalleeStructRet || isCallerStructRet) 1921 return false; 1922 1923 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1924 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1925 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1926 // support in the assembler and linker to be used. This would need to be 1927 // fixed to fully support tail calls in Thumb1. 1928 // 1929 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1930 // LR. This means if we need to reload LR, it takes an extra instructions, 1931 // which outweighs the value of the tail call; but here we don't know yet 1932 // whether LR is going to be used. Probably the right approach is to 1933 // generate the tail call here and turn it back into CALL/RET in 1934 // emitEpilogue if LR is used. 1935 1936 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1937 // but we need to make sure there are enough registers; the only valid 1938 // registers are the 4 used for parameters. We don't currently do this 1939 // case. 1940 if (Subtarget->isThumb1Only()) 1941 return false; 1942 1943 // If the calling conventions do not match, then we'd better make sure the 1944 // results are returned in the same way as what the caller expects. 1945 if (!CCMatch) { 1946 SmallVector<CCValAssign, 16> RVLocs1; 1947 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1948 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1949 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1950 1951 SmallVector<CCValAssign, 16> RVLocs2; 1952 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1953 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1954 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1955 1956 if (RVLocs1.size() != RVLocs2.size()) 1957 return false; 1958 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1959 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1960 return false; 1961 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1962 return false; 1963 if (RVLocs1[i].isRegLoc()) { 1964 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1965 return false; 1966 } else { 1967 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1968 return false; 1969 } 1970 } 1971 } 1972 1973 // If Caller's vararg or byval argument has been split between registers and 1974 // stack, do not perform tail call, since part of the argument is in caller's 1975 // local frame. 1976 const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). 1977 getInfo<ARMFunctionInfo>(); 1978 if (AFI_Caller->getArgRegsSaveSize()) 1979 return false; 1980 1981 // If the callee takes no arguments then go on to check the results of the 1982 // call. 1983 if (!Outs.empty()) { 1984 // Check if stack adjustment is needed. For now, do not do this if any 1985 // argument is passed on the stack. 1986 SmallVector<CCValAssign, 16> ArgLocs; 1987 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1988 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1989 CCInfo.AnalyzeCallOperands(Outs, 1990 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1991 if (CCInfo.getNextStackOffset()) { 1992 MachineFunction &MF = DAG.getMachineFunction(); 1993 1994 // Check if the arguments are already laid out in the right way as 1995 // the caller's fixed stack objects. 1996 MachineFrameInfo *MFI = MF.getFrameInfo(); 1997 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1998 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1999 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 2000 i != e; 2001 ++i, ++realArgIdx) { 2002 CCValAssign &VA = ArgLocs[i]; 2003 EVT RegVT = VA.getLocVT(); 2004 SDValue Arg = OutVals[realArgIdx]; 2005 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 2006 if (VA.getLocInfo() == CCValAssign::Indirect) 2007 return false; 2008 if (VA.needsCustom()) { 2009 // f64 and vector types are split into multiple registers or 2010 // register/stack-slot combinations. The types will not match 2011 // the registers; give up on memory f64 refs until we figure 2012 // out what to do about this. 2013 if (!VA.isRegLoc()) 2014 return false; 2015 if (!ArgLocs[++i].isRegLoc()) 2016 return false; 2017 if (RegVT == MVT::v2f64) { 2018 if (!ArgLocs[++i].isRegLoc()) 2019 return false; 2020 if (!ArgLocs[++i].isRegLoc()) 2021 return false; 2022 } 2023 } else if (!VA.isRegLoc()) { 2024 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 2025 MFI, MRI, TII)) 2026 return false; 2027 } 2028 } 2029 } 2030 } 2031 2032 return true; 2033 } 2034 2035 bool 2036 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2037 MachineFunction &MF, bool isVarArg, 2038 const SmallVectorImpl<ISD::OutputArg> &Outs, 2039 LLVMContext &Context) const { 2040 SmallVector<CCValAssign, 16> RVLocs; 2041 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 2042 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, 2043 isVarArg)); 2044 } 2045 2046 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps, 2047 SDLoc DL, SelectionDAG &DAG) { 2048 const MachineFunction &MF = DAG.getMachineFunction(); 2049 const Function *F = MF.getFunction(); 2050 2051 StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString(); 2052 2053 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset 2054 // version of the "preferred return address". These offsets affect the return 2055 // instruction if this is a return from PL1 without hypervisor extensions. 2056 // IRQ/FIQ: +4 "subs pc, lr, #4" 2057 // SWI: 0 "subs pc, lr, #0" 2058 // ABORT: +4 "subs pc, lr, #4" 2059 // UNDEF: +4/+2 "subs pc, lr, #0" 2060 // UNDEF varies depending on where the exception came from ARM or Thumb 2061 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0. 2062 2063 int64_t LROffset; 2064 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" || 2065 IntKind == "ABORT") 2066 LROffset = 4; 2067 else if (IntKind == "SWI" || IntKind == "UNDEF") 2068 LROffset = 0; 2069 else 2070 report_fatal_error("Unsupported interrupt attribute. If present, value " 2071 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF"); 2072 2073 RetOps.insert(RetOps.begin() + 1, DAG.getConstant(LROffset, MVT::i32, false)); 2074 2075 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps); 2076 } 2077 2078 SDValue 2079 ARMTargetLowering::LowerReturn(SDValue Chain, 2080 CallingConv::ID CallConv, bool isVarArg, 2081 const SmallVectorImpl<ISD::OutputArg> &Outs, 2082 const SmallVectorImpl<SDValue> &OutVals, 2083 SDLoc dl, SelectionDAG &DAG) const { 2084 2085 // CCValAssign - represent the assignment of the return value to a location. 2086 SmallVector<CCValAssign, 16> RVLocs; 2087 2088 // CCState - Info about the registers and stack slots. 2089 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2090 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 2091 2092 // Analyze outgoing return values. 2093 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 2094 isVarArg)); 2095 2096 SDValue Flag; 2097 SmallVector<SDValue, 4> RetOps; 2098 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2099 bool isLittleEndian = Subtarget->isLittle(); 2100 2101 // Copy the result values into the output registers. 2102 for (unsigned i = 0, realRVLocIdx = 0; 2103 i != RVLocs.size(); 2104 ++i, ++realRVLocIdx) { 2105 CCValAssign &VA = RVLocs[i]; 2106 assert(VA.isRegLoc() && "Can only return in registers!"); 2107 2108 SDValue Arg = OutVals[realRVLocIdx]; 2109 2110 switch (VA.getLocInfo()) { 2111 default: llvm_unreachable("Unknown loc info!"); 2112 case CCValAssign::Full: break; 2113 case CCValAssign::BCvt: 2114 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2115 break; 2116 } 2117 2118 if (VA.needsCustom()) { 2119 if (VA.getLocVT() == MVT::v2f64) { 2120 // Extract the first half and return it in two registers. 2121 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2122 DAG.getConstant(0, MVT::i32)); 2123 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 2124 DAG.getVTList(MVT::i32, MVT::i32), Half); 2125 2126 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2127 HalfGPRs.getValue(isLittleEndian ? 0 : 1), 2128 Flag); 2129 Flag = Chain.getValue(1); 2130 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2131 VA = RVLocs[++i]; // skip ahead to next loc 2132 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2133 HalfGPRs.getValue(isLittleEndian ? 1 : 0), 2134 Flag); 2135 Flag = Chain.getValue(1); 2136 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2137 VA = RVLocs[++i]; // skip ahead to next loc 2138 2139 // Extract the 2nd half and fall through to handle it as an f64 value. 2140 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2141 DAG.getConstant(1, MVT::i32)); 2142 } 2143 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 2144 // available. 2145 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2146 DAG.getVTList(MVT::i32, MVT::i32), Arg); 2147 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2148 fmrrd.getValue(isLittleEndian ? 0 : 1), 2149 Flag); 2150 Flag = Chain.getValue(1); 2151 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2152 VA = RVLocs[++i]; // skip ahead to next loc 2153 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2154 fmrrd.getValue(isLittleEndian ? 1 : 0), 2155 Flag); 2156 } else 2157 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 2158 2159 // Guarantee that all emitted copies are 2160 // stuck together, avoiding something bad. 2161 Flag = Chain.getValue(1); 2162 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2163 } 2164 2165 // Update chain and glue. 2166 RetOps[0] = Chain; 2167 if (Flag.getNode()) 2168 RetOps.push_back(Flag); 2169 2170 // CPUs which aren't M-class use a special sequence to return from 2171 // exceptions (roughly, any instruction setting pc and cpsr simultaneously, 2172 // though we use "subs pc, lr, #N"). 2173 // 2174 // M-class CPUs actually use a normal return sequence with a special 2175 // (hardware-provided) value in LR, so the normal code path works. 2176 if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") && 2177 !Subtarget->isMClass()) { 2178 if (Subtarget->isThumb1Only()) 2179 report_fatal_error("interrupt attribute is not supported in Thumb1"); 2180 return LowerInterruptReturn(RetOps, dl, DAG); 2181 } 2182 2183 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps); 2184 } 2185 2186 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 2187 if (N->getNumValues() != 1) 2188 return false; 2189 if (!N->hasNUsesOfValue(1, 0)) 2190 return false; 2191 2192 SDValue TCChain = Chain; 2193 SDNode *Copy = *N->use_begin(); 2194 if (Copy->getOpcode() == ISD::CopyToReg) { 2195 // If the copy has a glue operand, we conservatively assume it isn't safe to 2196 // perform a tail call. 2197 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2198 return false; 2199 TCChain = Copy->getOperand(0); 2200 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2201 SDNode *VMov = Copy; 2202 // f64 returned in a pair of GPRs. 2203 SmallPtrSet<SDNode*, 2> Copies; 2204 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2205 UI != UE; ++UI) { 2206 if (UI->getOpcode() != ISD::CopyToReg) 2207 return false; 2208 Copies.insert(*UI); 2209 } 2210 if (Copies.size() > 2) 2211 return false; 2212 2213 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2214 UI != UE; ++UI) { 2215 SDValue UseChain = UI->getOperand(0); 2216 if (Copies.count(UseChain.getNode())) 2217 // Second CopyToReg 2218 Copy = *UI; 2219 else 2220 // First CopyToReg 2221 TCChain = UseChain; 2222 } 2223 } else if (Copy->getOpcode() == ISD::BITCAST) { 2224 // f32 returned in a single GPR. 2225 if (!Copy->hasOneUse()) 2226 return false; 2227 Copy = *Copy->use_begin(); 2228 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2229 return false; 2230 TCChain = Copy->getOperand(0); 2231 } else { 2232 return false; 2233 } 2234 2235 bool HasRet = false; 2236 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2237 UI != UE; ++UI) { 2238 if (UI->getOpcode() != ARMISD::RET_FLAG && 2239 UI->getOpcode() != ARMISD::INTRET_FLAG) 2240 return false; 2241 HasRet = true; 2242 } 2243 2244 if (!HasRet) 2245 return false; 2246 2247 Chain = TCChain; 2248 return true; 2249 } 2250 2251 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2252 if (!Subtarget->supportsTailCall()) 2253 return false; 2254 2255 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) 2256 return false; 2257 2258 return !Subtarget->isThumb1Only(); 2259 } 2260 2261 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2262 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2263 // one of the above mentioned nodes. It has to be wrapped because otherwise 2264 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2265 // be used to form addressing mode. These wrapped nodes will be selected 2266 // into MOVi. 2267 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2268 EVT PtrVT = Op.getValueType(); 2269 // FIXME there is no actual debug info here 2270 SDLoc dl(Op); 2271 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2272 SDValue Res; 2273 if (CP->isMachineConstantPoolEntry()) 2274 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2275 CP->getAlignment()); 2276 else 2277 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2278 CP->getAlignment()); 2279 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2280 } 2281 2282 unsigned ARMTargetLowering::getJumpTableEncoding() const { 2283 return MachineJumpTableInfo::EK_Inline; 2284 } 2285 2286 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2287 SelectionDAG &DAG) const { 2288 MachineFunction &MF = DAG.getMachineFunction(); 2289 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2290 unsigned ARMPCLabelIndex = 0; 2291 SDLoc DL(Op); 2292 EVT PtrVT = getPointerTy(); 2293 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2294 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2295 SDValue CPAddr; 2296 if (RelocM == Reloc::Static) { 2297 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2298 } else { 2299 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2300 ARMPCLabelIndex = AFI->createPICLabelUId(); 2301 ARMConstantPoolValue *CPV = 2302 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2303 ARMCP::CPBlockAddress, PCAdj); 2304 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2305 } 2306 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2307 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 2308 MachinePointerInfo::getConstantPool(), 2309 false, false, false, 0); 2310 if (RelocM == Reloc::Static) 2311 return Result; 2312 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2313 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2314 } 2315 2316 // Lower ISD::GlobalTLSAddress using the "general dynamic" model 2317 SDValue 2318 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2319 SelectionDAG &DAG) const { 2320 SDLoc dl(GA); 2321 EVT PtrVT = getPointerTy(); 2322 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2323 MachineFunction &MF = DAG.getMachineFunction(); 2324 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2325 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2326 ARMConstantPoolValue *CPV = 2327 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2328 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2329 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2330 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2331 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2332 MachinePointerInfo::getConstantPool(), 2333 false, false, false, 0); 2334 SDValue Chain = Argument.getValue(1); 2335 2336 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2337 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2338 2339 // call __tls_get_addr. 2340 ArgListTy Args; 2341 ArgListEntry Entry; 2342 Entry.Node = Argument; 2343 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2344 Args.push_back(Entry); 2345 2346 // FIXME: is there useful debug info available here? 2347 TargetLowering::CallLoweringInfo CLI(DAG); 2348 CLI.setDebugLoc(dl).setChain(Chain) 2349 .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()), 2350 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args), 2351 0); 2352 2353 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2354 return CallResult.first; 2355 } 2356 2357 // Lower ISD::GlobalTLSAddress using the "initial exec" or 2358 // "local exec" model. 2359 SDValue 2360 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2361 SelectionDAG &DAG, 2362 TLSModel::Model model) const { 2363 const GlobalValue *GV = GA->getGlobal(); 2364 SDLoc dl(GA); 2365 SDValue Offset; 2366 SDValue Chain = DAG.getEntryNode(); 2367 EVT PtrVT = getPointerTy(); 2368 // Get the Thread Pointer 2369 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2370 2371 if (model == TLSModel::InitialExec) { 2372 MachineFunction &MF = DAG.getMachineFunction(); 2373 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2374 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2375 // Initial exec model. 2376 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2377 ARMConstantPoolValue *CPV = 2378 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2379 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2380 true); 2381 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2382 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2383 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2384 MachinePointerInfo::getConstantPool(), 2385 false, false, false, 0); 2386 Chain = Offset.getValue(1); 2387 2388 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2389 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2390 2391 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2392 MachinePointerInfo::getConstantPool(), 2393 false, false, false, 0); 2394 } else { 2395 // local exec model 2396 assert(model == TLSModel::LocalExec); 2397 ARMConstantPoolValue *CPV = 2398 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2399 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2400 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2401 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2402 MachinePointerInfo::getConstantPool(), 2403 false, false, false, 0); 2404 } 2405 2406 // The address of the thread local variable is the add of the thread 2407 // pointer with the offset of the variable. 2408 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2409 } 2410 2411 SDValue 2412 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2413 // TODO: implement the "local dynamic" model 2414 assert(Subtarget->isTargetELF() && 2415 "TLS not implemented for non-ELF targets"); 2416 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2417 2418 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2419 2420 switch (model) { 2421 case TLSModel::GeneralDynamic: 2422 case TLSModel::LocalDynamic: 2423 return LowerToTLSGeneralDynamicModel(GA, DAG); 2424 case TLSModel::InitialExec: 2425 case TLSModel::LocalExec: 2426 return LowerToTLSExecModels(GA, DAG, model); 2427 } 2428 llvm_unreachable("bogus TLS model"); 2429 } 2430 2431 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2432 SelectionDAG &DAG) const { 2433 EVT PtrVT = getPointerTy(); 2434 SDLoc dl(Op); 2435 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2436 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2437 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2438 ARMConstantPoolValue *CPV = 2439 ARMConstantPoolConstant::Create(GV, 2440 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2441 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2442 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2443 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2444 CPAddr, 2445 MachinePointerInfo::getConstantPool(), 2446 false, false, false, 0); 2447 SDValue Chain = Result.getValue(1); 2448 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2449 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2450 if (!UseGOTOFF) 2451 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2452 MachinePointerInfo::getGOT(), 2453 false, false, false, 0); 2454 return Result; 2455 } 2456 2457 // If we have T2 ops, we can materialize the address directly via movt/movw 2458 // pair. This is always cheaper. 2459 if (Subtarget->useMovt(DAG.getMachineFunction())) { 2460 ++NumMovwMovt; 2461 // FIXME: Once remat is capable of dealing with instructions with register 2462 // operands, expand this into two nodes. 2463 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2464 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2465 } else { 2466 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2467 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2468 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2469 MachinePointerInfo::getConstantPool(), 2470 false, false, false, 0); 2471 } 2472 } 2473 2474 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2475 SelectionDAG &DAG) const { 2476 EVT PtrVT = getPointerTy(); 2477 SDLoc dl(Op); 2478 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2479 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2480 2481 if (Subtarget->useMovt(DAG.getMachineFunction())) 2482 ++NumMovwMovt; 2483 2484 // FIXME: Once remat is capable of dealing with instructions with register 2485 // operands, expand this into multiple nodes 2486 unsigned Wrapper = 2487 RelocM == Reloc::PIC_ ? ARMISD::WrapperPIC : ARMISD::Wrapper; 2488 2489 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY); 2490 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G); 2491 2492 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2493 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2494 MachinePointerInfo::getGOT(), false, false, false, 0); 2495 return Result; 2496 } 2497 2498 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op, 2499 SelectionDAG &DAG) const { 2500 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported"); 2501 assert(Subtarget->useMovt(DAG.getMachineFunction()) && 2502 "Windows on ARM expects to use movw/movt"); 2503 2504 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2505 const ARMII::TOF TargetFlags = 2506 (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG); 2507 EVT PtrVT = getPointerTy(); 2508 SDValue Result; 2509 SDLoc DL(Op); 2510 2511 ++NumMovwMovt; 2512 2513 // FIXME: Once remat is capable of dealing with instructions with register 2514 // operands, expand this into two nodes. 2515 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, 2516 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0, 2517 TargetFlags)); 2518 if (GV->hasDLLImportStorageClass()) 2519 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 2520 MachinePointerInfo::getGOT(), false, false, false, 0); 2521 return Result; 2522 } 2523 2524 SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2525 SelectionDAG &DAG) const { 2526 assert(Subtarget->isTargetELF() && 2527 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2528 MachineFunction &MF = DAG.getMachineFunction(); 2529 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2530 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2531 EVT PtrVT = getPointerTy(); 2532 SDLoc dl(Op); 2533 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2534 ARMConstantPoolValue *CPV = 2535 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2536 ARMPCLabelIndex, PCAdj); 2537 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2538 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2539 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2540 MachinePointerInfo::getConstantPool(), 2541 false, false, false, 0); 2542 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2543 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2544 } 2545 2546 SDValue 2547 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2548 SDLoc dl(Op); 2549 SDValue Val = DAG.getConstant(0, MVT::i32); 2550 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2551 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2552 Op.getOperand(1), Val); 2553 } 2554 2555 SDValue 2556 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2557 SDLoc dl(Op); 2558 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2559 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2560 } 2561 2562 SDValue 2563 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2564 const ARMSubtarget *Subtarget) const { 2565 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2566 SDLoc dl(Op); 2567 switch (IntNo) { 2568 default: return SDValue(); // Don't custom lower most intrinsics. 2569 case Intrinsic::arm_rbit: { 2570 assert(Op.getOperand(0).getValueType() == MVT::i32 && 2571 "RBIT intrinsic must have i32 type!"); 2572 return DAG.getNode(ARMISD::RBIT, dl, MVT::i32, Op.getOperand(0)); 2573 } 2574 case Intrinsic::arm_thread_pointer: { 2575 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2576 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2577 } 2578 case Intrinsic::eh_sjlj_lsda: { 2579 MachineFunction &MF = DAG.getMachineFunction(); 2580 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2581 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2582 EVT PtrVT = getPointerTy(); 2583 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2584 SDValue CPAddr; 2585 unsigned PCAdj = (RelocM != Reloc::PIC_) 2586 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2587 ARMConstantPoolValue *CPV = 2588 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2589 ARMCP::CPLSDA, PCAdj); 2590 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2591 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2592 SDValue Result = 2593 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2594 MachinePointerInfo::getConstantPool(), 2595 false, false, false, 0); 2596 2597 if (RelocM == Reloc::PIC_) { 2598 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2599 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2600 } 2601 return Result; 2602 } 2603 case Intrinsic::arm_neon_vmulls: 2604 case Intrinsic::arm_neon_vmullu: { 2605 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2606 ? ARMISD::VMULLs : ARMISD::VMULLu; 2607 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(), 2608 Op.getOperand(1), Op.getOperand(2)); 2609 } 2610 } 2611 } 2612 2613 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2614 const ARMSubtarget *Subtarget) { 2615 // FIXME: handle "fence singlethread" more efficiently. 2616 SDLoc dl(Op); 2617 if (!Subtarget->hasDataBarrier()) { 2618 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2619 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2620 // here. 2621 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2622 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!"); 2623 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2624 DAG.getConstant(0, MVT::i32)); 2625 } 2626 2627 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1)); 2628 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue()); 2629 unsigned Domain = ARM_MB::ISH; 2630 if (Subtarget->isMClass()) { 2631 // Only a full system barrier exists in the M-class architectures. 2632 Domain = ARM_MB::SY; 2633 } else if (Subtarget->isSwift() && Ord == Release) { 2634 // Swift happens to implement ISHST barriers in a way that's compatible with 2635 // Release semantics but weaker than ISH so we'd be fools not to use 2636 // it. Beware: other processors probably don't! 2637 Domain = ARM_MB::ISHST; 2638 } 2639 2640 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0), 2641 DAG.getConstant(Intrinsic::arm_dmb, MVT::i32), 2642 DAG.getConstant(Domain, MVT::i32)); 2643 } 2644 2645 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2646 const ARMSubtarget *Subtarget) { 2647 // ARM pre v5TE and Thumb1 does not have preload instructions. 2648 if (!(Subtarget->isThumb2() || 2649 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2650 // Just preserve the chain. 2651 return Op.getOperand(0); 2652 2653 SDLoc dl(Op); 2654 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2655 if (!isRead && 2656 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2657 // ARMv7 with MP extension has PLDW. 2658 return Op.getOperand(0); 2659 2660 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2661 if (Subtarget->isThumb()) { 2662 // Invert the bits. 2663 isRead = ~isRead & 1; 2664 isData = ~isData & 1; 2665 } 2666 2667 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2668 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2669 DAG.getConstant(isData, MVT::i32)); 2670 } 2671 2672 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2673 MachineFunction &MF = DAG.getMachineFunction(); 2674 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2675 2676 // vastart just stores the address of the VarArgsFrameIndex slot into the 2677 // memory location argument. 2678 SDLoc dl(Op); 2679 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2680 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2681 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2682 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2683 MachinePointerInfo(SV), false, false, 0); 2684 } 2685 2686 SDValue 2687 ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2688 SDValue &Root, SelectionDAG &DAG, 2689 SDLoc dl) const { 2690 MachineFunction &MF = DAG.getMachineFunction(); 2691 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2692 2693 const TargetRegisterClass *RC; 2694 if (AFI->isThumb1OnlyFunction()) 2695 RC = &ARM::tGPRRegClass; 2696 else 2697 RC = &ARM::GPRRegClass; 2698 2699 // Transform the arguments stored in physical registers into virtual ones. 2700 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2701 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2702 2703 SDValue ArgValue2; 2704 if (NextVA.isMemLoc()) { 2705 MachineFrameInfo *MFI = MF.getFrameInfo(); 2706 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2707 2708 // Create load node to retrieve arguments from the stack. 2709 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2710 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2711 MachinePointerInfo::getFixedStack(FI), 2712 false, false, false, 0); 2713 } else { 2714 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2715 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2716 } 2717 if (!Subtarget->isLittle()) 2718 std::swap (ArgValue, ArgValue2); 2719 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2720 } 2721 2722 void 2723 ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2724 unsigned InRegsParamRecordIdx, 2725 unsigned ArgSize, 2726 unsigned &ArgRegsSize, 2727 unsigned &ArgRegsSaveSize) 2728 const { 2729 unsigned NumGPRs; 2730 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 2731 unsigned RBegin, REnd; 2732 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 2733 NumGPRs = REnd - RBegin; 2734 } else { 2735 unsigned int firstUnalloced; 2736 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2737 sizeof(GPRArgRegs) / 2738 sizeof(GPRArgRegs[0])); 2739 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2740 } 2741 2742 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2743 ArgRegsSize = NumGPRs * 4; 2744 2745 // If parameter is split between stack and GPRs... 2746 if (NumGPRs && Align > 4 && 2747 (ArgRegsSize < ArgSize || 2748 InRegsParamRecordIdx >= CCInfo.getInRegsParamsCount())) { 2749 // Add padding for part of param recovered from GPRs. For example, 2750 // if Align == 8, its last byte must be at address K*8 - 1. 2751 // We need to do it, since remained (stack) part of parameter has 2752 // stack alignment, and we need to "attach" "GPRs head" without gaps 2753 // to it: 2754 // Stack: 2755 // |---- 8 bytes block ----| |---- 8 bytes block ----| |---- 8 bytes... 2756 // [ [padding] [GPRs head] ] [ Tail passed via stack .... 2757 // 2758 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2759 unsigned Padding = 2760 OffsetToAlignment(ArgRegsSize + AFI->getArgRegsSaveSize(), Align); 2761 ArgRegsSaveSize = ArgRegsSize + Padding; 2762 } else 2763 // We don't need to extend regs save size for byval parameters if they 2764 // are passed via GPRs only. 2765 ArgRegsSaveSize = ArgRegsSize; 2766 } 2767 2768 // The remaining GPRs hold either the beginning of variable-argument 2769 // data, or the beginning of an aggregate passed by value (usually 2770 // byval). Either way, we allocate stack slots adjacent to the data 2771 // provided by our caller, and store the unallocated registers there. 2772 // If this is a variadic function, the va_list pointer will begin with 2773 // these values; otherwise, this reassembles a (byval) structure that 2774 // was split between registers and memory. 2775 // Return: The frame index registers were stored into. 2776 int 2777 ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 2778 SDLoc dl, SDValue &Chain, 2779 const Value *OrigArg, 2780 unsigned InRegsParamRecordIdx, 2781 unsigned OffsetFromOrigArg, 2782 unsigned ArgOffset, 2783 unsigned ArgSize, 2784 bool ForceMutable, 2785 unsigned ByValStoreOffset, 2786 unsigned TotalArgRegsSaveSize) const { 2787 2788 // Currently, two use-cases possible: 2789 // Case #1. Non-var-args function, and we meet first byval parameter. 2790 // Setup first unallocated register as first byval register; 2791 // eat all remained registers 2792 // (these two actions are performed by HandleByVal method). 2793 // Then, here, we initialize stack frame with 2794 // "store-reg" instructions. 2795 // Case #2. Var-args function, that doesn't contain byval parameters. 2796 // The same: eat all remained unallocated registers, 2797 // initialize stack frame. 2798 2799 MachineFunction &MF = DAG.getMachineFunction(); 2800 MachineFrameInfo *MFI = MF.getFrameInfo(); 2801 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2802 unsigned firstRegToSaveIndex, lastRegToSaveIndex; 2803 unsigned RBegin, REnd; 2804 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 2805 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 2806 firstRegToSaveIndex = RBegin - ARM::R0; 2807 lastRegToSaveIndex = REnd - ARM::R0; 2808 } else { 2809 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2810 (GPRArgRegs, array_lengthof(GPRArgRegs)); 2811 lastRegToSaveIndex = 4; 2812 } 2813 2814 unsigned ArgRegsSize, ArgRegsSaveSize; 2815 computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgSize, 2816 ArgRegsSize, ArgRegsSaveSize); 2817 2818 // Store any by-val regs to their spots on the stack so that they may be 2819 // loaded by deferencing the result of formal parameter pointer or va_next. 2820 // Note: once stack area for byval/varargs registers 2821 // was initialized, it can't be initialized again. 2822 if (ArgRegsSaveSize) { 2823 unsigned Padding = ArgRegsSaveSize - ArgRegsSize; 2824 2825 if (Padding) { 2826 assert(AFI->getStoredByValParamsPadding() == 0 && 2827 "The only parameter may be padded."); 2828 AFI->setStoredByValParamsPadding(Padding); 2829 } 2830 2831 int FrameIndex = MFI->CreateFixedObject(ArgRegsSaveSize, 2832 Padding + 2833 ByValStoreOffset - 2834 (int64_t)TotalArgRegsSaveSize, 2835 false); 2836 SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy()); 2837 if (Padding) { 2838 MFI->CreateFixedObject(Padding, 2839 ArgOffset + ByValStoreOffset - 2840 (int64_t)ArgRegsSaveSize, 2841 false); 2842 } 2843 2844 SmallVector<SDValue, 4> MemOps; 2845 for (unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex; 2846 ++firstRegToSaveIndex, ++i) { 2847 const TargetRegisterClass *RC; 2848 if (AFI->isThumb1OnlyFunction()) 2849 RC = &ARM::tGPRRegClass; 2850 else 2851 RC = &ARM::GPRRegClass; 2852 2853 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2854 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2855 SDValue Store = 2856 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2857 MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i), 2858 false, false, 0); 2859 MemOps.push_back(Store); 2860 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2861 DAG.getConstant(4, getPointerTy())); 2862 } 2863 2864 AFI->setArgRegsSaveSize(ArgRegsSaveSize + AFI->getArgRegsSaveSize()); 2865 2866 if (!MemOps.empty()) 2867 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 2868 return FrameIndex; 2869 } else { 2870 if (ArgSize == 0) { 2871 // We cannot allocate a zero-byte object for the first variadic argument, 2872 // so just make up a size. 2873 ArgSize = 4; 2874 } 2875 // This will point to the next argument passed via stack. 2876 return MFI->CreateFixedObject( 2877 ArgSize, ArgOffset, !ForceMutable); 2878 } 2879 } 2880 2881 // Setup stack frame, the va_list pointer will start from. 2882 void 2883 ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2884 SDLoc dl, SDValue &Chain, 2885 unsigned ArgOffset, 2886 unsigned TotalArgRegsSaveSize, 2887 bool ForceMutable) const { 2888 MachineFunction &MF = DAG.getMachineFunction(); 2889 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2890 2891 // Try to store any remaining integer argument regs 2892 // to their spots on the stack so that they may be loaded by deferencing 2893 // the result of va_next. 2894 // If there is no regs to be stored, just point address after last 2895 // argument passed via stack. 2896 int FrameIndex = 2897 StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, 2898 CCInfo.getInRegsParamsCount(), 0, ArgOffset, 0, ForceMutable, 2899 0, TotalArgRegsSaveSize); 2900 2901 AFI->setVarArgsFrameIndex(FrameIndex); 2902 } 2903 2904 SDValue 2905 ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2906 CallingConv::ID CallConv, bool isVarArg, 2907 const SmallVectorImpl<ISD::InputArg> 2908 &Ins, 2909 SDLoc dl, SelectionDAG &DAG, 2910 SmallVectorImpl<SDValue> &InVals) 2911 const { 2912 MachineFunction &MF = DAG.getMachineFunction(); 2913 MachineFrameInfo *MFI = MF.getFrameInfo(); 2914 2915 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2916 2917 // Assign locations to all of the incoming arguments. 2918 SmallVector<CCValAssign, 16> ArgLocs; 2919 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2920 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2921 CCInfo.AnalyzeFormalArguments(Ins, 2922 CCAssignFnForNode(CallConv, /* Return*/ false, 2923 isVarArg)); 2924 2925 SmallVector<SDValue, 16> ArgValues; 2926 int lastInsIndex = -1; 2927 SDValue ArgValue; 2928 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 2929 unsigned CurArgIdx = 0; 2930 2931 // Initially ArgRegsSaveSize is zero. 2932 // Then we increase this value each time we meet byval parameter. 2933 // We also increase this value in case of varargs function. 2934 AFI->setArgRegsSaveSize(0); 2935 2936 unsigned ByValStoreOffset = 0; 2937 unsigned TotalArgRegsSaveSize = 0; 2938 unsigned ArgRegsSaveSizeMaxAlign = 4; 2939 2940 // Calculate the amount of stack space that we need to allocate to store 2941 // byval and variadic arguments that are passed in registers. 2942 // We need to know this before we allocate the first byval or variadic 2943 // argument, as they will be allocated a stack slot below the CFA (Canonical 2944 // Frame Address, the stack pointer at entry to the function). 2945 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2946 CCValAssign &VA = ArgLocs[i]; 2947 if (VA.isMemLoc()) { 2948 int index = VA.getValNo(); 2949 if (index != lastInsIndex) { 2950 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2951 if (Flags.isByVal()) { 2952 unsigned ExtraArgRegsSize; 2953 unsigned ExtraArgRegsSaveSize; 2954 computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsProceed(), 2955 Flags.getByValSize(), 2956 ExtraArgRegsSize, ExtraArgRegsSaveSize); 2957 2958 TotalArgRegsSaveSize += ExtraArgRegsSaveSize; 2959 if (Flags.getByValAlign() > ArgRegsSaveSizeMaxAlign) 2960 ArgRegsSaveSizeMaxAlign = Flags.getByValAlign(); 2961 CCInfo.nextInRegsParam(); 2962 } 2963 lastInsIndex = index; 2964 } 2965 } 2966 } 2967 CCInfo.rewindByValRegsInfo(); 2968 lastInsIndex = -1; 2969 if (isVarArg) { 2970 unsigned ExtraArgRegsSize; 2971 unsigned ExtraArgRegsSaveSize; 2972 computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsCount(), 0, 2973 ExtraArgRegsSize, ExtraArgRegsSaveSize); 2974 TotalArgRegsSaveSize += ExtraArgRegsSaveSize; 2975 } 2976 // If the arg regs save area contains N-byte aligned values, the 2977 // bottom of it must be at least N-byte aligned. 2978 TotalArgRegsSaveSize = RoundUpToAlignment(TotalArgRegsSaveSize, ArgRegsSaveSizeMaxAlign); 2979 TotalArgRegsSaveSize = std::min(TotalArgRegsSaveSize, 16U); 2980 2981 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2982 CCValAssign &VA = ArgLocs[i]; 2983 std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx); 2984 CurArgIdx = Ins[VA.getValNo()].OrigArgIndex; 2985 // Arguments stored in registers. 2986 if (VA.isRegLoc()) { 2987 EVT RegVT = VA.getLocVT(); 2988 2989 if (VA.needsCustom()) { 2990 // f64 and vector types are split up into multiple registers or 2991 // combinations of registers and stack slots. 2992 if (VA.getLocVT() == MVT::v2f64) { 2993 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2994 Chain, DAG, dl); 2995 VA = ArgLocs[++i]; // skip ahead to next loc 2996 SDValue ArgValue2; 2997 if (VA.isMemLoc()) { 2998 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2999 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 3000 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 3001 MachinePointerInfo::getFixedStack(FI), 3002 false, false, false, 0); 3003 } else { 3004 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 3005 Chain, DAG, dl); 3006 } 3007 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 3008 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3009 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 3010 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 3011 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 3012 } else 3013 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 3014 3015 } else { 3016 const TargetRegisterClass *RC; 3017 3018 if (RegVT == MVT::f32) 3019 RC = &ARM::SPRRegClass; 3020 else if (RegVT == MVT::f64) 3021 RC = &ARM::DPRRegClass; 3022 else if (RegVT == MVT::v2f64) 3023 RC = &ARM::QPRRegClass; 3024 else if (RegVT == MVT::i32) 3025 RC = AFI->isThumb1OnlyFunction() ? 3026 (const TargetRegisterClass*)&ARM::tGPRRegClass : 3027 (const TargetRegisterClass*)&ARM::GPRRegClass; 3028 else 3029 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 3030 3031 // Transform the arguments in physical registers into virtual ones. 3032 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3033 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 3034 } 3035 3036 // If this is an 8 or 16-bit value, it is really passed promoted 3037 // to 32 bits. Insert an assert[sz]ext to capture this, then 3038 // truncate to the right size. 3039 switch (VA.getLocInfo()) { 3040 default: llvm_unreachable("Unknown loc info!"); 3041 case CCValAssign::Full: break; 3042 case CCValAssign::BCvt: 3043 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 3044 break; 3045 case CCValAssign::SExt: 3046 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 3047 DAG.getValueType(VA.getValVT())); 3048 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3049 break; 3050 case CCValAssign::ZExt: 3051 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 3052 DAG.getValueType(VA.getValVT())); 3053 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 3054 break; 3055 } 3056 3057 InVals.push_back(ArgValue); 3058 3059 } else { // VA.isRegLoc() 3060 3061 // sanity check 3062 assert(VA.isMemLoc()); 3063 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 3064 3065 int index = ArgLocs[i].getValNo(); 3066 3067 // Some Ins[] entries become multiple ArgLoc[] entries. 3068 // Process them only once. 3069 if (index != lastInsIndex) 3070 { 3071 ISD::ArgFlagsTy Flags = Ins[index].Flags; 3072 // FIXME: For now, all byval parameter objects are marked mutable. 3073 // This can be changed with more analysis. 3074 // In case of tail call optimization mark all arguments mutable. 3075 // Since they could be overwritten by lowering of arguments in case of 3076 // a tail call. 3077 if (Flags.isByVal()) { 3078 unsigned CurByValIndex = CCInfo.getInRegsParamsProceed(); 3079 3080 ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign()); 3081 int FrameIndex = StoreByValRegs( 3082 CCInfo, DAG, dl, Chain, CurOrigArg, 3083 CurByValIndex, 3084 Ins[VA.getValNo()].PartOffset, 3085 VA.getLocMemOffset(), 3086 Flags.getByValSize(), 3087 true /*force mutable frames*/, 3088 ByValStoreOffset, 3089 TotalArgRegsSaveSize); 3090 ByValStoreOffset += Flags.getByValSize(); 3091 ByValStoreOffset = std::min(ByValStoreOffset, 16U); 3092 InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy())); 3093 CCInfo.nextInRegsParam(); 3094 } else { 3095 unsigned FIOffset = VA.getLocMemOffset(); 3096 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 3097 FIOffset, true); 3098 3099 // Create load nodes to retrieve arguments from the stack. 3100 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 3101 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 3102 MachinePointerInfo::getFixedStack(FI), 3103 false, false, false, 0)); 3104 } 3105 lastInsIndex = index; 3106 } 3107 } 3108 } 3109 3110 // varargs 3111 if (isVarArg) 3112 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 3113 CCInfo.getNextStackOffset(), 3114 TotalArgRegsSaveSize); 3115 3116 AFI->setArgumentStackSize(CCInfo.getNextStackOffset()); 3117 3118 return Chain; 3119 } 3120 3121 /// isFloatingPointZero - Return true if this is +0.0. 3122 static bool isFloatingPointZero(SDValue Op) { 3123 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 3124 return CFP->getValueAPF().isPosZero(); 3125 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 3126 // Maybe this has already been legalized into the constant pool? 3127 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 3128 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 3129 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 3130 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 3131 return CFP->getValueAPF().isPosZero(); 3132 } 3133 } 3134 return false; 3135 } 3136 3137 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for 3138 /// the given operands. 3139 SDValue 3140 ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3141 SDValue &ARMcc, SelectionDAG &DAG, 3142 SDLoc dl) const { 3143 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 3144 unsigned C = RHSC->getZExtValue(); 3145 if (!isLegalICmpImmediate(C)) { 3146 // Constant does not fit, try adjusting it by one? 3147 switch (CC) { 3148 default: break; 3149 case ISD::SETLT: 3150 case ISD::SETGE: 3151 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 3152 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 3153 RHS = DAG.getConstant(C-1, MVT::i32); 3154 } 3155 break; 3156 case ISD::SETULT: 3157 case ISD::SETUGE: 3158 if (C != 0 && isLegalICmpImmediate(C-1)) { 3159 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 3160 RHS = DAG.getConstant(C-1, MVT::i32); 3161 } 3162 break; 3163 case ISD::SETLE: 3164 case ISD::SETGT: 3165 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 3166 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 3167 RHS = DAG.getConstant(C+1, MVT::i32); 3168 } 3169 break; 3170 case ISD::SETULE: 3171 case ISD::SETUGT: 3172 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 3173 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3174 RHS = DAG.getConstant(C+1, MVT::i32); 3175 } 3176 break; 3177 } 3178 } 3179 } 3180 3181 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3182 ARMISD::NodeType CompareType; 3183 switch (CondCode) { 3184 default: 3185 CompareType = ARMISD::CMP; 3186 break; 3187 case ARMCC::EQ: 3188 case ARMCC::NE: 3189 // Uses only Z Flag 3190 CompareType = ARMISD::CMPZ; 3191 break; 3192 } 3193 ARMcc = DAG.getConstant(CondCode, MVT::i32); 3194 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 3195 } 3196 3197 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 3198 SDValue 3199 ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 3200 SDLoc dl) const { 3201 SDValue Cmp; 3202 if (!isFloatingPointZero(RHS)) 3203 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 3204 else 3205 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 3206 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 3207 } 3208 3209 /// duplicateCmp - Glue values can have only one use, so this function 3210 /// duplicates a comparison node. 3211 SDValue 3212 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 3213 unsigned Opc = Cmp.getOpcode(); 3214 SDLoc DL(Cmp); 3215 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 3216 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3217 3218 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 3219 Cmp = Cmp.getOperand(0); 3220 Opc = Cmp.getOpcode(); 3221 if (Opc == ARMISD::CMPFP) 3222 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3223 else { 3224 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 3225 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 3226 } 3227 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 3228 } 3229 3230 std::pair<SDValue, SDValue> 3231 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG, 3232 SDValue &ARMcc) const { 3233 assert(Op.getValueType() == MVT::i32 && "Unsupported value type"); 3234 3235 SDValue Value, OverflowCmp; 3236 SDValue LHS = Op.getOperand(0); 3237 SDValue RHS = Op.getOperand(1); 3238 3239 3240 // FIXME: We are currently always generating CMPs because we don't support 3241 // generating CMN through the backend. This is not as good as the natural 3242 // CMP case because it causes a register dependency and cannot be folded 3243 // later. 3244 3245 switch (Op.getOpcode()) { 3246 default: 3247 llvm_unreachable("Unknown overflow instruction!"); 3248 case ISD::SADDO: 3249 ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32); 3250 Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS); 3251 OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS); 3252 break; 3253 case ISD::UADDO: 3254 ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32); 3255 Value = DAG.getNode(ISD::ADD, SDLoc(Op), Op.getValueType(), LHS, RHS); 3256 OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, Value, LHS); 3257 break; 3258 case ISD::SSUBO: 3259 ARMcc = DAG.getConstant(ARMCC::VC, MVT::i32); 3260 Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS); 3261 OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS); 3262 break; 3263 case ISD::USUBO: 3264 ARMcc = DAG.getConstant(ARMCC::HS, MVT::i32); 3265 Value = DAG.getNode(ISD::SUB, SDLoc(Op), Op.getValueType(), LHS, RHS); 3266 OverflowCmp = DAG.getNode(ARMISD::CMP, SDLoc(Op), MVT::Glue, LHS, RHS); 3267 break; 3268 } // switch (...) 3269 3270 return std::make_pair(Value, OverflowCmp); 3271 } 3272 3273 3274 SDValue 3275 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { 3276 // Let legalize expand this if it isn't a legal type yet. 3277 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 3278 return SDValue(); 3279 3280 SDValue Value, OverflowCmp; 3281 SDValue ARMcc; 3282 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc); 3283 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3284 // We use 0 and 1 as false and true values. 3285 SDValue TVal = DAG.getConstant(1, MVT::i32); 3286 SDValue FVal = DAG.getConstant(0, MVT::i32); 3287 EVT VT = Op.getValueType(); 3288 3289 SDValue Overflow = DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, TVal, FVal, 3290 ARMcc, CCR, OverflowCmp); 3291 3292 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 3293 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), VTs, Value, Overflow); 3294 } 3295 3296 3297 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3298 SDValue Cond = Op.getOperand(0); 3299 SDValue SelectTrue = Op.getOperand(1); 3300 SDValue SelectFalse = Op.getOperand(2); 3301 SDLoc dl(Op); 3302 unsigned Opc = Cond.getOpcode(); 3303 3304 if (Cond.getResNo() == 1 && 3305 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 3306 Opc == ISD::USUBO)) { 3307 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0))) 3308 return SDValue(); 3309 3310 SDValue Value, OverflowCmp; 3311 SDValue ARMcc; 3312 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc); 3313 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3314 EVT VT = Op.getValueType(); 3315 3316 return DAG.getNode(ARMISD::CMOV, SDLoc(Op), VT, SelectTrue, SelectFalse, 3317 ARMcc, CCR, OverflowCmp); 3318 3319 } 3320 3321 // Convert: 3322 // 3323 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 3324 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 3325 // 3326 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 3327 const ConstantSDNode *CMOVTrue = 3328 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 3329 const ConstantSDNode *CMOVFalse = 3330 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3331 3332 if (CMOVTrue && CMOVFalse) { 3333 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 3334 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 3335 3336 SDValue True; 3337 SDValue False; 3338 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 3339 True = SelectTrue; 3340 False = SelectFalse; 3341 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 3342 True = SelectFalse; 3343 False = SelectTrue; 3344 } 3345 3346 if (True.getNode() && False.getNode()) { 3347 EVT VT = Op.getValueType(); 3348 SDValue ARMcc = Cond.getOperand(2); 3349 SDValue CCR = Cond.getOperand(3); 3350 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 3351 assert(True.getValueType() == VT); 3352 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 3353 } 3354 } 3355 } 3356 3357 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 3358 // undefined bits before doing a full-word comparison with zero. 3359 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 3360 DAG.getConstant(1, Cond.getValueType())); 3361 3362 return DAG.getSelectCC(dl, Cond, 3363 DAG.getConstant(0, Cond.getValueType()), 3364 SelectTrue, SelectFalse, ISD::SETNE); 3365 } 3366 3367 static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) { 3368 if (CC == ISD::SETNE) 3369 return ISD::SETEQ; 3370 return ISD::getSetCCInverse(CC, true); 3371 } 3372 3373 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 3374 bool &swpCmpOps, bool &swpVselOps) { 3375 // Start by selecting the GE condition code for opcodes that return true for 3376 // 'equality' 3377 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE || 3378 CC == ISD::SETULE) 3379 CondCode = ARMCC::GE; 3380 3381 // and GT for opcodes that return false for 'equality'. 3382 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT || 3383 CC == ISD::SETULT) 3384 CondCode = ARMCC::GT; 3385 3386 // Since we are constrained to GE/GT, if the opcode contains 'less', we need 3387 // to swap the compare operands. 3388 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT || 3389 CC == ISD::SETULT) 3390 swpCmpOps = true; 3391 3392 // Both GT and GE are ordered comparisons, and return false for 'unordered'. 3393 // If we have an unordered opcode, we need to swap the operands to the VSEL 3394 // instruction (effectively negating the condition). 3395 // 3396 // This also has the effect of swapping which one of 'less' or 'greater' 3397 // returns true, so we also swap the compare operands. It also switches 3398 // whether we return true for 'equality', so we compensate by picking the 3399 // opposite condition code to our original choice. 3400 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE || 3401 CC == ISD::SETUGT) { 3402 swpCmpOps = !swpCmpOps; 3403 swpVselOps = !swpVselOps; 3404 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT; 3405 } 3406 3407 // 'ordered' is 'anything but unordered', so use the VS condition code and 3408 // swap the VSEL operands. 3409 if (CC == ISD::SETO) { 3410 CondCode = ARMCC::VS; 3411 swpVselOps = true; 3412 } 3413 3414 // 'unordered or not equal' is 'anything but equal', so use the EQ condition 3415 // code and swap the VSEL operands. 3416 if (CC == ISD::SETUNE) { 3417 CondCode = ARMCC::EQ; 3418 swpVselOps = true; 3419 } 3420 } 3421 3422 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 3423 EVT VT = Op.getValueType(); 3424 SDValue LHS = Op.getOperand(0); 3425 SDValue RHS = Op.getOperand(1); 3426 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3427 SDValue TrueVal = Op.getOperand(2); 3428 SDValue FalseVal = Op.getOperand(3); 3429 SDLoc dl(Op); 3430 3431 if (LHS.getValueType() == MVT::i32) { 3432 // Try to generate VSEL on ARMv8. 3433 // The VSEL instruction can't use all the usual ARM condition 3434 // codes: it only has two bits to select the condition code, so it's 3435 // constrained to use only GE, GT, VS and EQ. 3436 // 3437 // To implement all the various ISD::SETXXX opcodes, we sometimes need to 3438 // swap the operands of the previous compare instruction (effectively 3439 // inverting the compare condition, swapping 'less' and 'greater') and 3440 // sometimes need to swap the operands to the VSEL (which inverts the 3441 // condition in the sense of firing whenever the previous condition didn't) 3442 if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3443 TrueVal.getValueType() == MVT::f64)) { 3444 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3445 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || 3446 CondCode == ARMCC::VC || CondCode == ARMCC::NE) { 3447 CC = getInverseCCForVSEL(CC); 3448 std::swap(TrueVal, FalseVal); 3449 } 3450 } 3451 3452 SDValue ARMcc; 3453 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3454 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3455 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR, 3456 Cmp); 3457 } 3458 3459 ARMCC::CondCodes CondCode, CondCode2; 3460 FPCCToARMCC(CC, CondCode, CondCode2); 3461 3462 // Try to generate VSEL on ARMv8. 3463 if (getSubtarget()->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || 3464 TrueVal.getValueType() == MVT::f64)) { 3465 // We can select VMAXNM/VMINNM from a compare followed by a select with the 3466 // same operands, as follows: 3467 // c = fcmp [ogt, olt, ugt, ult] a, b 3468 // select c, a, b 3469 // We only do this in unsafe-fp-math, because signed zeros and NaNs are 3470 // handled differently than the original code sequence. 3471 if (getTargetMachine().Options.UnsafeFPMath && LHS == TrueVal && 3472 RHS == FalseVal) { 3473 if (CC == ISD::SETOGT || CC == ISD::SETUGT) 3474 return DAG.getNode(ARMISD::VMAXNM, dl, VT, TrueVal, FalseVal); 3475 if (CC == ISD::SETOLT || CC == ISD::SETULT) 3476 return DAG.getNode(ARMISD::VMINNM, dl, VT, TrueVal, FalseVal); 3477 } 3478 3479 bool swpCmpOps = false; 3480 bool swpVselOps = false; 3481 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps); 3482 3483 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE || 3484 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) { 3485 if (swpCmpOps) 3486 std::swap(LHS, RHS); 3487 if (swpVselOps) 3488 std::swap(TrueVal, FalseVal); 3489 } 3490 } 3491 3492 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3493 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3494 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3495 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 3496 ARMcc, CCR, Cmp); 3497 if (CondCode2 != ARMCC::AL) { 3498 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 3499 // FIXME: Needs another CMP because flag can have but one use. 3500 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 3501 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 3502 Result, TrueVal, ARMcc2, CCR, Cmp2); 3503 } 3504 return Result; 3505 } 3506 3507 /// canChangeToInt - Given the fp compare operand, return true if it is suitable 3508 /// to morph to an integer compare sequence. 3509 static bool canChangeToInt(SDValue Op, bool &SeenZero, 3510 const ARMSubtarget *Subtarget) { 3511 SDNode *N = Op.getNode(); 3512 if (!N->hasOneUse()) 3513 // Otherwise it requires moving the value from fp to integer registers. 3514 return false; 3515 if (!N->getNumValues()) 3516 return false; 3517 EVT VT = Op.getValueType(); 3518 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 3519 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 3520 // vmrs are very slow, e.g. cortex-a8. 3521 return false; 3522 3523 if (isFloatingPointZero(Op)) { 3524 SeenZero = true; 3525 return true; 3526 } 3527 return ISD::isNormalLoad(N); 3528 } 3529 3530 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 3531 if (isFloatingPointZero(Op)) 3532 return DAG.getConstant(0, MVT::i32); 3533 3534 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 3535 return DAG.getLoad(MVT::i32, SDLoc(Op), 3536 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 3537 Ld->isVolatile(), Ld->isNonTemporal(), 3538 Ld->isInvariant(), Ld->getAlignment()); 3539 3540 llvm_unreachable("Unknown VFP cmp argument!"); 3541 } 3542 3543 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 3544 SDValue &RetVal1, SDValue &RetVal2) { 3545 if (isFloatingPointZero(Op)) { 3546 RetVal1 = DAG.getConstant(0, MVT::i32); 3547 RetVal2 = DAG.getConstant(0, MVT::i32); 3548 return; 3549 } 3550 3551 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 3552 SDValue Ptr = Ld->getBasePtr(); 3553 RetVal1 = DAG.getLoad(MVT::i32, SDLoc(Op), 3554 Ld->getChain(), Ptr, 3555 Ld->getPointerInfo(), 3556 Ld->isVolatile(), Ld->isNonTemporal(), 3557 Ld->isInvariant(), Ld->getAlignment()); 3558 3559 EVT PtrType = Ptr.getValueType(); 3560 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 3561 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(Op), 3562 PtrType, Ptr, DAG.getConstant(4, PtrType)); 3563 RetVal2 = DAG.getLoad(MVT::i32, SDLoc(Op), 3564 Ld->getChain(), NewPtr, 3565 Ld->getPointerInfo().getWithOffset(4), 3566 Ld->isVolatile(), Ld->isNonTemporal(), 3567 Ld->isInvariant(), NewAlign); 3568 return; 3569 } 3570 3571 llvm_unreachable("Unknown VFP cmp argument!"); 3572 } 3573 3574 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 3575 /// f32 and even f64 comparisons to integer ones. 3576 SDValue 3577 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 3578 SDValue Chain = Op.getOperand(0); 3579 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3580 SDValue LHS = Op.getOperand(2); 3581 SDValue RHS = Op.getOperand(3); 3582 SDValue Dest = Op.getOperand(4); 3583 SDLoc dl(Op); 3584 3585 bool LHSSeenZero = false; 3586 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 3587 bool RHSSeenZero = false; 3588 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 3589 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 3590 // If unsafe fp math optimization is enabled and there are no other uses of 3591 // the CMP operands, and the condition code is EQ or NE, we can optimize it 3592 // to an integer comparison. 3593 if (CC == ISD::SETOEQ) 3594 CC = ISD::SETEQ; 3595 else if (CC == ISD::SETUNE) 3596 CC = ISD::SETNE; 3597 3598 SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32); 3599 SDValue ARMcc; 3600 if (LHS.getValueType() == MVT::f32) { 3601 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3602 bitcastf32Toi32(LHS, DAG), Mask); 3603 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3604 bitcastf32Toi32(RHS, DAG), Mask); 3605 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3606 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3607 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3608 Chain, Dest, ARMcc, CCR, Cmp); 3609 } 3610 3611 SDValue LHS1, LHS2; 3612 SDValue RHS1, RHS2; 3613 expandf64Toi32(LHS, DAG, LHS1, LHS2); 3614 expandf64Toi32(RHS, DAG, RHS1, RHS2); 3615 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 3616 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 3617 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3618 ARMcc = DAG.getConstant(CondCode, MVT::i32); 3619 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3620 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 3621 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops); 3622 } 3623 3624 return SDValue(); 3625 } 3626 3627 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3628 SDValue Chain = Op.getOperand(0); 3629 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3630 SDValue LHS = Op.getOperand(2); 3631 SDValue RHS = Op.getOperand(3); 3632 SDValue Dest = Op.getOperand(4); 3633 SDLoc dl(Op); 3634 3635 if (LHS.getValueType() == MVT::i32) { 3636 SDValue ARMcc; 3637 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3638 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3639 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3640 Chain, Dest, ARMcc, CCR, Cmp); 3641 } 3642 3643 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 3644 3645 if (getTargetMachine().Options.UnsafeFPMath && 3646 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 3647 CC == ISD::SETNE || CC == ISD::SETUNE)) { 3648 SDValue Result = OptimizeVFPBrcond(Op, DAG); 3649 if (Result.getNode()) 3650 return Result; 3651 } 3652 3653 ARMCC::CondCodes CondCode, CondCode2; 3654 FPCCToARMCC(CC, CondCode, CondCode2); 3655 3656 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3657 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3658 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3659 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3660 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 3661 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 3662 if (CondCode2 != ARMCC::AL) { 3663 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 3664 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 3665 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops); 3666 } 3667 return Res; 3668 } 3669 3670 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 3671 SDValue Chain = Op.getOperand(0); 3672 SDValue Table = Op.getOperand(1); 3673 SDValue Index = Op.getOperand(2); 3674 SDLoc dl(Op); 3675 3676 EVT PTy = getPointerTy(); 3677 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 3678 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 3679 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 3680 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 3681 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 3682 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 3683 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3684 if (Subtarget->isThumb2()) { 3685 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 3686 // which does another jump to the destination. This also makes it easier 3687 // to translate it to TBB / TBH later. 3688 // FIXME: This might not work if the function is extremely large. 3689 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 3690 Addr, Op.getOperand(2), JTI, UId); 3691 } 3692 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3693 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3694 MachinePointerInfo::getJumpTable(), 3695 false, false, false, 0); 3696 Chain = Addr.getValue(1); 3697 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3698 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3699 } else { 3700 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3701 MachinePointerInfo::getJumpTable(), 3702 false, false, false, 0); 3703 Chain = Addr.getValue(1); 3704 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3705 } 3706 } 3707 3708 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3709 EVT VT = Op.getValueType(); 3710 SDLoc dl(Op); 3711 3712 if (Op.getValueType().getVectorElementType() == MVT::i32) { 3713 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 3714 return Op; 3715 return DAG.UnrollVectorOp(Op.getNode()); 3716 } 3717 3718 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 3719 "Invalid type for custom lowering!"); 3720 if (VT != MVT::v4i16) 3721 return DAG.UnrollVectorOp(Op.getNode()); 3722 3723 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 3724 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 3725 } 3726 3727 static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3728 EVT VT = Op.getValueType(); 3729 if (VT.isVector()) 3730 return LowerVectorFP_TO_INT(Op, DAG); 3731 3732 SDLoc dl(Op); 3733 unsigned Opc; 3734 3735 switch (Op.getOpcode()) { 3736 default: llvm_unreachable("Invalid opcode!"); 3737 case ISD::FP_TO_SINT: 3738 Opc = ARMISD::FTOSI; 3739 break; 3740 case ISD::FP_TO_UINT: 3741 Opc = ARMISD::FTOUI; 3742 break; 3743 } 3744 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3745 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3746 } 3747 3748 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3749 EVT VT = Op.getValueType(); 3750 SDLoc dl(Op); 3751 3752 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 3753 if (VT.getVectorElementType() == MVT::f32) 3754 return Op; 3755 return DAG.UnrollVectorOp(Op.getNode()); 3756 } 3757 3758 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3759 "Invalid type for custom lowering!"); 3760 if (VT != MVT::v4f32) 3761 return DAG.UnrollVectorOp(Op.getNode()); 3762 3763 unsigned CastOpc; 3764 unsigned Opc; 3765 switch (Op.getOpcode()) { 3766 default: llvm_unreachable("Invalid opcode!"); 3767 case ISD::SINT_TO_FP: 3768 CastOpc = ISD::SIGN_EXTEND; 3769 Opc = ISD::SINT_TO_FP; 3770 break; 3771 case ISD::UINT_TO_FP: 3772 CastOpc = ISD::ZERO_EXTEND; 3773 Opc = ISD::UINT_TO_FP; 3774 break; 3775 } 3776 3777 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3778 return DAG.getNode(Opc, dl, VT, Op); 3779 } 3780 3781 static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3782 EVT VT = Op.getValueType(); 3783 if (VT.isVector()) 3784 return LowerVectorINT_TO_FP(Op, DAG); 3785 3786 SDLoc dl(Op); 3787 unsigned Opc; 3788 3789 switch (Op.getOpcode()) { 3790 default: llvm_unreachable("Invalid opcode!"); 3791 case ISD::SINT_TO_FP: 3792 Opc = ARMISD::SITOF; 3793 break; 3794 case ISD::UINT_TO_FP: 3795 Opc = ARMISD::UITOF; 3796 break; 3797 } 3798 3799 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3800 return DAG.getNode(Opc, dl, VT, Op); 3801 } 3802 3803 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3804 // Implement fcopysign with a fabs and a conditional fneg. 3805 SDValue Tmp0 = Op.getOperand(0); 3806 SDValue Tmp1 = Op.getOperand(1); 3807 SDLoc dl(Op); 3808 EVT VT = Op.getValueType(); 3809 EVT SrcVT = Tmp1.getValueType(); 3810 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3811 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3812 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3813 3814 if (UseNEON) { 3815 // Use VBSL to copy the sign bit. 3816 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3817 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3818 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3819 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3820 if (VT == MVT::f64) 3821 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3822 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3823 DAG.getConstant(32, MVT::i32)); 3824 else /*if (VT == MVT::f32)*/ 3825 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3826 if (SrcVT == MVT::f32) { 3827 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3828 if (VT == MVT::f64) 3829 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3830 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3831 DAG.getConstant(32, MVT::i32)); 3832 } else if (VT == MVT::f32) 3833 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3834 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3835 DAG.getConstant(32, MVT::i32)); 3836 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3837 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3838 3839 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3840 MVT::i32); 3841 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3842 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3843 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3844 3845 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3846 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3847 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3848 if (VT == MVT::f32) { 3849 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3850 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3851 DAG.getConstant(0, MVT::i32)); 3852 } else { 3853 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3854 } 3855 3856 return Res; 3857 } 3858 3859 // Bitcast operand 1 to i32. 3860 if (SrcVT == MVT::f64) 3861 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3862 Tmp1).getValue(1); 3863 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3864 3865 // Or in the signbit with integer operations. 3866 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3867 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3868 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3869 if (VT == MVT::f32) { 3870 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3871 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3872 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3873 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3874 } 3875 3876 // f64: Or the high part with signbit and then combine two parts. 3877 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3878 Tmp0); 3879 SDValue Lo = Tmp0.getValue(0); 3880 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3881 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3882 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3883 } 3884 3885 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3886 MachineFunction &MF = DAG.getMachineFunction(); 3887 MachineFrameInfo *MFI = MF.getFrameInfo(); 3888 MFI->setReturnAddressIsTaken(true); 3889 3890 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 3891 return SDValue(); 3892 3893 EVT VT = Op.getValueType(); 3894 SDLoc dl(Op); 3895 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3896 if (Depth) { 3897 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3898 SDValue Offset = DAG.getConstant(4, MVT::i32); 3899 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3900 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3901 MachinePointerInfo(), false, false, false, 0); 3902 } 3903 3904 // Return LR, which contains the return address. Mark it an implicit live-in. 3905 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3906 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3907 } 3908 3909 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3910 const ARMBaseRegisterInfo &ARI = 3911 *static_cast<const ARMBaseRegisterInfo*>(RegInfo); 3912 MachineFunction &MF = DAG.getMachineFunction(); 3913 MachineFrameInfo *MFI = MF.getFrameInfo(); 3914 MFI->setFrameAddressIsTaken(true); 3915 3916 EVT VT = Op.getValueType(); 3917 SDLoc dl(Op); // FIXME probably not meaningful 3918 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3919 unsigned FrameReg = ARI.getFrameRegister(MF); 3920 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3921 while (Depth--) 3922 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3923 MachinePointerInfo(), 3924 false, false, false, 0); 3925 return FrameAddr; 3926 } 3927 3928 // FIXME? Maybe this could be a TableGen attribute on some registers and 3929 // this table could be generated automatically from RegInfo. 3930 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, 3931 EVT VT) const { 3932 unsigned Reg = StringSwitch<unsigned>(RegName) 3933 .Case("sp", ARM::SP) 3934 .Default(0); 3935 if (Reg) 3936 return Reg; 3937 report_fatal_error("Invalid register name global variable"); 3938 } 3939 3940 /// ExpandBITCAST - If the target supports VFP, this function is called to 3941 /// expand a bit convert where either the source or destination type is i64 to 3942 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3943 /// operand type is illegal (e.g., v2f32 for a target that doesn't support 3944 /// vectors), since the legalizer won't know what to do with that. 3945 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3946 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3947 SDLoc dl(N); 3948 SDValue Op = N->getOperand(0); 3949 3950 // This function is only supposed to be called for i64 types, either as the 3951 // source or destination of the bit convert. 3952 EVT SrcVT = Op.getValueType(); 3953 EVT DstVT = N->getValueType(0); 3954 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3955 "ExpandBITCAST called for non-i64 type"); 3956 3957 // Turn i64->f64 into VMOVDRR. 3958 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3959 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3960 DAG.getConstant(0, MVT::i32)); 3961 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3962 DAG.getConstant(1, MVT::i32)); 3963 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3964 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3965 } 3966 3967 // Turn f64->i64 into VMOVRRD. 3968 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3969 SDValue Cvt; 3970 if (TLI.isBigEndian() && SrcVT.isVector() && 3971 SrcVT.getVectorNumElements() > 1) 3972 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3973 DAG.getVTList(MVT::i32, MVT::i32), 3974 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op)); 3975 else 3976 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3977 DAG.getVTList(MVT::i32, MVT::i32), Op); 3978 // Merge the pieces into a single i64 value. 3979 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3980 } 3981 3982 return SDValue(); 3983 } 3984 3985 /// getZeroVector - Returns a vector of specified type with all zero elements. 3986 /// Zero vectors are used to represent vector negation and in those cases 3987 /// will be implemented with the NEON VNEG instruction. However, VNEG does 3988 /// not support i64 elements, so sometimes the zero vectors will need to be 3989 /// explicitly constructed. Regardless, use a canonical VMOV to create the 3990 /// zero vector. 3991 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl) { 3992 assert(VT.isVector() && "Expected a vector type"); 3993 // The canonical modified immediate encoding of a zero vector is....0! 3994 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3995 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3996 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3997 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3998 } 3999 4000 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 4001 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4002 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 4003 SelectionDAG &DAG) const { 4004 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4005 EVT VT = Op.getValueType(); 4006 unsigned VTBits = VT.getSizeInBits(); 4007 SDLoc dl(Op); 4008 SDValue ShOpLo = Op.getOperand(0); 4009 SDValue ShOpHi = Op.getOperand(1); 4010 SDValue ShAmt = Op.getOperand(2); 4011 SDValue ARMcc; 4012 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 4013 4014 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 4015 4016 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4017 DAG.getConstant(VTBits, MVT::i32), ShAmt); 4018 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 4019 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4020 DAG.getConstant(VTBits, MVT::i32)); 4021 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 4022 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4023 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 4024 4025 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4026 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 4027 ARMcc, DAG, dl); 4028 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 4029 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 4030 CCR, Cmp); 4031 4032 SDValue Ops[2] = { Lo, Hi }; 4033 return DAG.getMergeValues(Ops, dl); 4034 } 4035 4036 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 4037 /// i32 values and take a 2 x i32 value to shift plus a shift amount. 4038 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 4039 SelectionDAG &DAG) const { 4040 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 4041 EVT VT = Op.getValueType(); 4042 unsigned VTBits = VT.getSizeInBits(); 4043 SDLoc dl(Op); 4044 SDValue ShOpLo = Op.getOperand(0); 4045 SDValue ShOpHi = Op.getOperand(1); 4046 SDValue ShAmt = Op.getOperand(2); 4047 SDValue ARMcc; 4048 4049 assert(Op.getOpcode() == ISD::SHL_PARTS); 4050 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 4051 DAG.getConstant(VTBits, MVT::i32), ShAmt); 4052 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 4053 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 4054 DAG.getConstant(VTBits, MVT::i32)); 4055 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 4056 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 4057 4058 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 4059 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 4060 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 4061 ARMcc, DAG, dl); 4062 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 4063 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 4064 CCR, Cmp); 4065 4066 SDValue Ops[2] = { Lo, Hi }; 4067 return DAG.getMergeValues(Ops, dl); 4068 } 4069 4070 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 4071 SelectionDAG &DAG) const { 4072 // The rounding mode is in bits 23:22 of the FPSCR. 4073 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 4074 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 4075 // so that the shift + and get folded into a bitfield extract. 4076 SDLoc dl(Op); 4077 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 4078 DAG.getConstant(Intrinsic::arm_get_fpscr, 4079 MVT::i32)); 4080 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 4081 DAG.getConstant(1U << 22, MVT::i32)); 4082 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 4083 DAG.getConstant(22, MVT::i32)); 4084 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 4085 DAG.getConstant(3, MVT::i32)); 4086 } 4087 4088 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 4089 const ARMSubtarget *ST) { 4090 EVT VT = N->getValueType(0); 4091 SDLoc dl(N); 4092 4093 if (!ST->hasV6T2Ops()) 4094 return SDValue(); 4095 4096 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 4097 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 4098 } 4099 4100 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count 4101 /// for each 16-bit element from operand, repeated. The basic idea is to 4102 /// leverage vcnt to get the 8-bit counts, gather and add the results. 4103 /// 4104 /// Trace for v4i16: 4105 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4106 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element) 4107 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi) 4108 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6] 4109 /// [b0 b1 b2 b3 b4 b5 b6 b7] 4110 /// +[b1 b0 b3 b2 b5 b4 b7 b6] 4111 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0, 4112 /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) 4113 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { 4114 EVT VT = N->getValueType(0); 4115 SDLoc DL(N); 4116 4117 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 4118 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); 4119 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0); 4120 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1); 4121 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2); 4122 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3); 4123 } 4124 4125 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the 4126 /// bit-count for each 16-bit element from the operand. We need slightly 4127 /// different sequencing for v4i16 and v8i16 to stay within NEON's available 4128 /// 64/128-bit registers. 4129 /// 4130 /// Trace for v4i16: 4131 /// input = [v0 v1 v2 v3 ] (vi 16-bit element) 4132 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi) 4133 /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ] 4134 /// v4i16:Extracted = [k0 k1 k2 k3 ] 4135 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { 4136 EVT VT = N->getValueType(0); 4137 SDLoc DL(N); 4138 4139 SDValue BitCounts = getCTPOP16BitCounts(N, DAG); 4140 if (VT.is64BitVector()) { 4141 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts); 4142 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended, 4143 DAG.getIntPtrConstant(0)); 4144 } else { 4145 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, 4146 BitCounts, DAG.getIntPtrConstant(0)); 4147 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted); 4148 } 4149 } 4150 4151 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the 4152 /// bit-count for each 32-bit element from the operand. The idea here is 4153 /// to split the vector into 16-bit elements, leverage the 16-bit count 4154 /// routine, and then combine the results. 4155 /// 4156 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged): 4157 /// input = [v0 v1 ] (vi: 32-bit elements) 4158 /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1]) 4159 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi) 4160 /// vrev: N0 = [k1 k0 k3 k2 ] 4161 /// [k0 k1 k2 k3 ] 4162 /// N1 =+[k1 k0 k3 k2 ] 4163 /// [k0 k2 k1 k3 ] 4164 /// N2 =+[k1 k3 k0 k2 ] 4165 /// [k0 k2 k1 k3 ] 4166 /// Extended =+[k1 k3 k0 k2 ] 4167 /// [k0 k2 ] 4168 /// Extracted=+[k1 k3 ] 4169 /// 4170 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { 4171 EVT VT = N->getValueType(0); 4172 SDLoc DL(N); 4173 4174 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 4175 4176 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0)); 4177 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG); 4178 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16); 4179 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0); 4180 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1); 4181 4182 if (VT.is64BitVector()) { 4183 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2); 4184 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended, 4185 DAG.getIntPtrConstant(0)); 4186 } else { 4187 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2, 4188 DAG.getIntPtrConstant(0)); 4189 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted); 4190 } 4191 } 4192 4193 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 4194 const ARMSubtarget *ST) { 4195 EVT VT = N->getValueType(0); 4196 4197 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 4198 assert((VT == MVT::v2i32 || VT == MVT::v4i32 || 4199 VT == MVT::v4i16 || VT == MVT::v8i16) && 4200 "Unexpected type for custom ctpop lowering"); 4201 4202 if (VT.getVectorElementType() == MVT::i32) 4203 return lowerCTPOP32BitElements(N, DAG); 4204 else 4205 return lowerCTPOP16BitElements(N, DAG); 4206 } 4207 4208 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 4209 const ARMSubtarget *ST) { 4210 EVT VT = N->getValueType(0); 4211 SDLoc dl(N); 4212 4213 if (!VT.isVector()) 4214 return SDValue(); 4215 4216 // Lower vector shifts on NEON to use VSHL. 4217 assert(ST->hasNEON() && "unexpected vector shift"); 4218 4219 // Left shifts translate directly to the vshiftu intrinsic. 4220 if (N->getOpcode() == ISD::SHL) 4221 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4222 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 4223 N->getOperand(0), N->getOperand(1)); 4224 4225 assert((N->getOpcode() == ISD::SRA || 4226 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 4227 4228 // NEON uses the same intrinsics for both left and right shifts. For 4229 // right shifts, the shift amounts are negative, so negate the vector of 4230 // shift amounts. 4231 EVT ShiftVT = N->getOperand(1).getValueType(); 4232 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 4233 getZeroVector(ShiftVT, DAG, dl), 4234 N->getOperand(1)); 4235 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 4236 Intrinsic::arm_neon_vshifts : 4237 Intrinsic::arm_neon_vshiftu); 4238 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 4239 DAG.getConstant(vshiftInt, MVT::i32), 4240 N->getOperand(0), NegatedCount); 4241 } 4242 4243 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 4244 const ARMSubtarget *ST) { 4245 EVT VT = N->getValueType(0); 4246 SDLoc dl(N); 4247 4248 // We can get here for a node like i32 = ISD::SHL i32, i64 4249 if (VT != MVT::i64) 4250 return SDValue(); 4251 4252 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 4253 "Unknown shift to lower!"); 4254 4255 // We only lower SRA, SRL of 1 here, all others use generic lowering. 4256 if (!isa<ConstantSDNode>(N->getOperand(1)) || 4257 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 4258 return SDValue(); 4259 4260 // If we are in thumb mode, we don't have RRX. 4261 if (ST->isThumb1Only()) return SDValue(); 4262 4263 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 4264 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4265 DAG.getConstant(0, MVT::i32)); 4266 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 4267 DAG.getConstant(1, MVT::i32)); 4268 4269 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 4270 // captures the result into a carry flag. 4271 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 4272 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi); 4273 4274 // The low part is an ARMISD::RRX operand, which shifts the carry in. 4275 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 4276 4277 // Merge the pieces into a single i64 value. 4278 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 4279 } 4280 4281 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 4282 SDValue TmpOp0, TmpOp1; 4283 bool Invert = false; 4284 bool Swap = false; 4285 unsigned Opc = 0; 4286 4287 SDValue Op0 = Op.getOperand(0); 4288 SDValue Op1 = Op.getOperand(1); 4289 SDValue CC = Op.getOperand(2); 4290 EVT VT = Op.getValueType(); 4291 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 4292 SDLoc dl(Op); 4293 4294 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 4295 switch (SetCCOpcode) { 4296 default: llvm_unreachable("Illegal FP comparison"); 4297 case ISD::SETUNE: 4298 case ISD::SETNE: Invert = true; // Fallthrough 4299 case ISD::SETOEQ: 4300 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4301 case ISD::SETOLT: 4302 case ISD::SETLT: Swap = true; // Fallthrough 4303 case ISD::SETOGT: 4304 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4305 case ISD::SETOLE: 4306 case ISD::SETLE: Swap = true; // Fallthrough 4307 case ISD::SETOGE: 4308 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4309 case ISD::SETUGE: Swap = true; // Fallthrough 4310 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 4311 case ISD::SETUGT: Swap = true; // Fallthrough 4312 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 4313 case ISD::SETUEQ: Invert = true; // Fallthrough 4314 case ISD::SETONE: 4315 // Expand this to (OLT | OGT). 4316 TmpOp0 = Op0; 4317 TmpOp1 = Op1; 4318 Opc = ISD::OR; 4319 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 4320 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 4321 break; 4322 case ISD::SETUO: Invert = true; // Fallthrough 4323 case ISD::SETO: 4324 // Expand this to (OLT | OGE). 4325 TmpOp0 = Op0; 4326 TmpOp1 = Op1; 4327 Opc = ISD::OR; 4328 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 4329 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 4330 break; 4331 } 4332 } else { 4333 // Integer comparisons. 4334 switch (SetCCOpcode) { 4335 default: llvm_unreachable("Illegal integer comparison"); 4336 case ISD::SETNE: Invert = true; 4337 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 4338 case ISD::SETLT: Swap = true; 4339 case ISD::SETGT: Opc = ARMISD::VCGT; break; 4340 case ISD::SETLE: Swap = true; 4341 case ISD::SETGE: Opc = ARMISD::VCGE; break; 4342 case ISD::SETULT: Swap = true; 4343 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 4344 case ISD::SETULE: Swap = true; 4345 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 4346 } 4347 4348 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 4349 if (Opc == ARMISD::VCEQ) { 4350 4351 SDValue AndOp; 4352 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4353 AndOp = Op0; 4354 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 4355 AndOp = Op1; 4356 4357 // Ignore bitconvert. 4358 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 4359 AndOp = AndOp.getOperand(0); 4360 4361 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 4362 Opc = ARMISD::VTST; 4363 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 4364 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 4365 Invert = !Invert; 4366 } 4367 } 4368 } 4369 4370 if (Swap) 4371 std::swap(Op0, Op1); 4372 4373 // If one of the operands is a constant vector zero, attempt to fold the 4374 // comparison to a specialized compare-against-zero form. 4375 SDValue SingleOp; 4376 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4377 SingleOp = Op0; 4378 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 4379 if (Opc == ARMISD::VCGE) 4380 Opc = ARMISD::VCLEZ; 4381 else if (Opc == ARMISD::VCGT) 4382 Opc = ARMISD::VCLTZ; 4383 SingleOp = Op1; 4384 } 4385 4386 SDValue Result; 4387 if (SingleOp.getNode()) { 4388 switch (Opc) { 4389 case ARMISD::VCEQ: 4390 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 4391 case ARMISD::VCGE: 4392 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 4393 case ARMISD::VCLEZ: 4394 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 4395 case ARMISD::VCGT: 4396 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 4397 case ARMISD::VCLTZ: 4398 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 4399 default: 4400 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 4401 } 4402 } else { 4403 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 4404 } 4405 4406 if (Invert) 4407 Result = DAG.getNOT(dl, Result, VT); 4408 4409 return Result; 4410 } 4411 4412 /// isNEONModifiedImm - Check if the specified splat value corresponds to a 4413 /// valid vector constant for a NEON instruction with a "modified immediate" 4414 /// operand (e.g., VMOV). If so, return the encoded value. 4415 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 4416 unsigned SplatBitSize, SelectionDAG &DAG, 4417 EVT &VT, bool is128Bits, NEONModImmType type) { 4418 unsigned OpCmode, Imm; 4419 4420 // SplatBitSize is set to the smallest size that splats the vector, so a 4421 // zero vector will always have SplatBitSize == 8. However, NEON modified 4422 // immediate instructions others than VMOV do not support the 8-bit encoding 4423 // of a zero vector, and the default encoding of zero is supposed to be the 4424 // 32-bit version. 4425 if (SplatBits == 0) 4426 SplatBitSize = 32; 4427 4428 switch (SplatBitSize) { 4429 case 8: 4430 if (type != VMOVModImm) 4431 return SDValue(); 4432 // Any 1-byte value is OK. Op=0, Cmode=1110. 4433 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 4434 OpCmode = 0xe; 4435 Imm = SplatBits; 4436 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 4437 break; 4438 4439 case 16: 4440 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 4441 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 4442 if ((SplatBits & ~0xff) == 0) { 4443 // Value = 0x00nn: Op=x, Cmode=100x. 4444 OpCmode = 0x8; 4445 Imm = SplatBits; 4446 break; 4447 } 4448 if ((SplatBits & ~0xff00) == 0) { 4449 // Value = 0xnn00: Op=x, Cmode=101x. 4450 OpCmode = 0xa; 4451 Imm = SplatBits >> 8; 4452 break; 4453 } 4454 return SDValue(); 4455 4456 case 32: 4457 // NEON's 32-bit VMOV supports splat values where: 4458 // * only one byte is nonzero, or 4459 // * the least significant byte is 0xff and the second byte is nonzero, or 4460 // * the least significant 2 bytes are 0xff and the third is nonzero. 4461 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 4462 if ((SplatBits & ~0xff) == 0) { 4463 // Value = 0x000000nn: Op=x, Cmode=000x. 4464 OpCmode = 0; 4465 Imm = SplatBits; 4466 break; 4467 } 4468 if ((SplatBits & ~0xff00) == 0) { 4469 // Value = 0x0000nn00: Op=x, Cmode=001x. 4470 OpCmode = 0x2; 4471 Imm = SplatBits >> 8; 4472 break; 4473 } 4474 if ((SplatBits & ~0xff0000) == 0) { 4475 // Value = 0x00nn0000: Op=x, Cmode=010x. 4476 OpCmode = 0x4; 4477 Imm = SplatBits >> 16; 4478 break; 4479 } 4480 if ((SplatBits & ~0xff000000) == 0) { 4481 // Value = 0xnn000000: Op=x, Cmode=011x. 4482 OpCmode = 0x6; 4483 Imm = SplatBits >> 24; 4484 break; 4485 } 4486 4487 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 4488 if (type == OtherModImm) return SDValue(); 4489 4490 if ((SplatBits & ~0xffff) == 0 && 4491 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 4492 // Value = 0x0000nnff: Op=x, Cmode=1100. 4493 OpCmode = 0xc; 4494 Imm = SplatBits >> 8; 4495 break; 4496 } 4497 4498 if ((SplatBits & ~0xffffff) == 0 && 4499 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 4500 // Value = 0x00nnffff: Op=x, Cmode=1101. 4501 OpCmode = 0xd; 4502 Imm = SplatBits >> 16; 4503 break; 4504 } 4505 4506 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 4507 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 4508 // VMOV.I32. A (very) minor optimization would be to replicate the value 4509 // and fall through here to test for a valid 64-bit splat. But, then the 4510 // caller would also need to check and handle the change in size. 4511 return SDValue(); 4512 4513 case 64: { 4514 if (type != VMOVModImm) 4515 return SDValue(); 4516 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 4517 uint64_t BitMask = 0xff; 4518 uint64_t Val = 0; 4519 unsigned ImmMask = 1; 4520 Imm = 0; 4521 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 4522 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 4523 Val |= BitMask; 4524 Imm |= ImmMask; 4525 } else if ((SplatBits & BitMask) != 0) { 4526 return SDValue(); 4527 } 4528 BitMask <<= 8; 4529 ImmMask <<= 1; 4530 } 4531 4532 if (DAG.getTargetLoweringInfo().isBigEndian()) 4533 // swap higher and lower 32 bit word 4534 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4); 4535 4536 // Op=1, Cmode=1110. 4537 OpCmode = 0x1e; 4538 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 4539 break; 4540 } 4541 4542 default: 4543 llvm_unreachable("unexpected size for isNEONModifiedImm"); 4544 } 4545 4546 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 4547 return DAG.getTargetConstant(EncodedVal, MVT::i32); 4548 } 4549 4550 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 4551 const ARMSubtarget *ST) const { 4552 if (!ST->hasVFP3()) 4553 return SDValue(); 4554 4555 bool IsDouble = Op.getValueType() == MVT::f64; 4556 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 4557 4558 // Try splatting with a VMOV.f32... 4559 APFloat FPVal = CFP->getValueAPF(); 4560 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal); 4561 4562 if (ImmVal != -1) { 4563 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) { 4564 // We have code in place to select a valid ConstantFP already, no need to 4565 // do any mangling. 4566 return Op; 4567 } 4568 4569 // It's a float and we are trying to use NEON operations where 4570 // possible. Lower it to a splat followed by an extract. 4571 SDLoc DL(Op); 4572 SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32); 4573 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 4574 NewVal); 4575 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 4576 DAG.getConstant(0, MVT::i32)); 4577 } 4578 4579 // The rest of our options are NEON only, make sure that's allowed before 4580 // proceeding.. 4581 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP())) 4582 return SDValue(); 4583 4584 EVT VMovVT; 4585 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue(); 4586 4587 // It wouldn't really be worth bothering for doubles except for one very 4588 // important value, which does happen to match: 0.0. So make sure we don't do 4589 // anything stupid. 4590 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32)) 4591 return SDValue(); 4592 4593 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too). 4594 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, VMovVT, 4595 false, VMOVModImm); 4596 if (NewVal != SDValue()) { 4597 SDLoc DL(Op); 4598 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 4599 NewVal); 4600 if (IsDouble) 4601 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 4602 4603 // It's a float: cast and extract a vector element. 4604 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 4605 VecConstant); 4606 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 4607 DAG.getConstant(0, MVT::i32)); 4608 } 4609 4610 // Finally, try a VMVN.i32 4611 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, VMovVT, 4612 false, VMVNModImm); 4613 if (NewVal != SDValue()) { 4614 SDLoc DL(Op); 4615 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 4616 4617 if (IsDouble) 4618 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant); 4619 4620 // It's a float: cast and extract a vector element. 4621 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 4622 VecConstant); 4623 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 4624 DAG.getConstant(0, MVT::i32)); 4625 } 4626 4627 return SDValue(); 4628 } 4629 4630 // check if an VEXT instruction can handle the shuffle mask when the 4631 // vector sources of the shuffle are the same. 4632 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 4633 unsigned NumElts = VT.getVectorNumElements(); 4634 4635 // Assume that the first shuffle index is not UNDEF. Fail if it is. 4636 if (M[0] < 0) 4637 return false; 4638 4639 Imm = M[0]; 4640 4641 // If this is a VEXT shuffle, the immediate value is the index of the first 4642 // element. The other shuffle indices must be the successive elements after 4643 // the first one. 4644 unsigned ExpectedElt = Imm; 4645 for (unsigned i = 1; i < NumElts; ++i) { 4646 // Increment the expected index. If it wraps around, just follow it 4647 // back to index zero and keep going. 4648 ++ExpectedElt; 4649 if (ExpectedElt == NumElts) 4650 ExpectedElt = 0; 4651 4652 if (M[i] < 0) continue; // ignore UNDEF indices 4653 if (ExpectedElt != static_cast<unsigned>(M[i])) 4654 return false; 4655 } 4656 4657 return true; 4658 } 4659 4660 4661 static bool isVEXTMask(ArrayRef<int> M, EVT VT, 4662 bool &ReverseVEXT, unsigned &Imm) { 4663 unsigned NumElts = VT.getVectorNumElements(); 4664 ReverseVEXT = false; 4665 4666 // Assume that the first shuffle index is not UNDEF. Fail if it is. 4667 if (M[0] < 0) 4668 return false; 4669 4670 Imm = M[0]; 4671 4672 // If this is a VEXT shuffle, the immediate value is the index of the first 4673 // element. The other shuffle indices must be the successive elements after 4674 // the first one. 4675 unsigned ExpectedElt = Imm; 4676 for (unsigned i = 1; i < NumElts; ++i) { 4677 // Increment the expected index. If it wraps around, it may still be 4678 // a VEXT but the source vectors must be swapped. 4679 ExpectedElt += 1; 4680 if (ExpectedElt == NumElts * 2) { 4681 ExpectedElt = 0; 4682 ReverseVEXT = true; 4683 } 4684 4685 if (M[i] < 0) continue; // ignore UNDEF indices 4686 if (ExpectedElt != static_cast<unsigned>(M[i])) 4687 return false; 4688 } 4689 4690 // Adjust the index value if the source operands will be swapped. 4691 if (ReverseVEXT) 4692 Imm -= NumElts; 4693 4694 return true; 4695 } 4696 4697 /// isVREVMask - Check if a vector shuffle corresponds to a VREV 4698 /// instruction with the specified blocksize. (The order of the elements 4699 /// within each block of the vector is reversed.) 4700 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 4701 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 4702 "Only possible block sizes for VREV are: 16, 32, 64"); 4703 4704 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4705 if (EltSz == 64) 4706 return false; 4707 4708 unsigned NumElts = VT.getVectorNumElements(); 4709 unsigned BlockElts = M[0] + 1; 4710 // If the first shuffle index is UNDEF, be optimistic. 4711 if (M[0] < 0) 4712 BlockElts = BlockSize / EltSz; 4713 4714 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 4715 return false; 4716 4717 for (unsigned i = 0; i < NumElts; ++i) { 4718 if (M[i] < 0) continue; // ignore UNDEF indices 4719 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 4720 return false; 4721 } 4722 4723 return true; 4724 } 4725 4726 static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 4727 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 4728 // range, then 0 is placed into the resulting vector. So pretty much any mask 4729 // of 8 elements can work here. 4730 return VT == MVT::v8i8 && M.size() == 8; 4731 } 4732 4733 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4734 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4735 if (EltSz == 64) 4736 return false; 4737 4738 unsigned NumElts = VT.getVectorNumElements(); 4739 WhichResult = (M[0] == 0 ? 0 : 1); 4740 for (unsigned i = 0; i < NumElts; i += 2) { 4741 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4742 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 4743 return false; 4744 } 4745 return true; 4746 } 4747 4748 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 4749 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4750 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 4751 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4752 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4753 if (EltSz == 64) 4754 return false; 4755 4756 unsigned NumElts = VT.getVectorNumElements(); 4757 WhichResult = (M[0] == 0 ? 0 : 1); 4758 for (unsigned i = 0; i < NumElts; i += 2) { 4759 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4760 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 4761 return false; 4762 } 4763 return true; 4764 } 4765 4766 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4767 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4768 if (EltSz == 64) 4769 return false; 4770 4771 unsigned NumElts = VT.getVectorNumElements(); 4772 WhichResult = (M[0] == 0 ? 0 : 1); 4773 for (unsigned i = 0; i != NumElts; ++i) { 4774 if (M[i] < 0) continue; // ignore UNDEF indices 4775 if ((unsigned) M[i] != 2 * i + WhichResult) 4776 return false; 4777 } 4778 4779 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4780 if (VT.is64BitVector() && EltSz == 32) 4781 return false; 4782 4783 return true; 4784 } 4785 4786 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 4787 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4788 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 4789 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4790 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4791 if (EltSz == 64) 4792 return false; 4793 4794 unsigned Half = VT.getVectorNumElements() / 2; 4795 WhichResult = (M[0] == 0 ? 0 : 1); 4796 for (unsigned j = 0; j != 2; ++j) { 4797 unsigned Idx = WhichResult; 4798 for (unsigned i = 0; i != Half; ++i) { 4799 int MIdx = M[i + j * Half]; 4800 if (MIdx >= 0 && (unsigned) MIdx != Idx) 4801 return false; 4802 Idx += 2; 4803 } 4804 } 4805 4806 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4807 if (VT.is64BitVector() && EltSz == 32) 4808 return false; 4809 4810 return true; 4811 } 4812 4813 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4814 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4815 if (EltSz == 64) 4816 return false; 4817 4818 unsigned NumElts = VT.getVectorNumElements(); 4819 WhichResult = (M[0] == 0 ? 0 : 1); 4820 unsigned Idx = WhichResult * NumElts / 2; 4821 for (unsigned i = 0; i != NumElts; i += 2) { 4822 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4823 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 4824 return false; 4825 Idx += 1; 4826 } 4827 4828 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4829 if (VT.is64BitVector() && EltSz == 32) 4830 return false; 4831 4832 return true; 4833 } 4834 4835 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 4836 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4837 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 4838 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4839 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4840 if (EltSz == 64) 4841 return false; 4842 4843 unsigned NumElts = VT.getVectorNumElements(); 4844 WhichResult = (M[0] == 0 ? 0 : 1); 4845 unsigned Idx = WhichResult * NumElts / 2; 4846 for (unsigned i = 0; i != NumElts; i += 2) { 4847 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4848 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 4849 return false; 4850 Idx += 1; 4851 } 4852 4853 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4854 if (VT.is64BitVector() && EltSz == 32) 4855 return false; 4856 4857 return true; 4858 } 4859 4860 /// \return true if this is a reverse operation on an vector. 4861 static bool isReverseMask(ArrayRef<int> M, EVT VT) { 4862 unsigned NumElts = VT.getVectorNumElements(); 4863 // Make sure the mask has the right size. 4864 if (NumElts != M.size()) 4865 return false; 4866 4867 // Look for <15, ..., 3, -1, 1, 0>. 4868 for (unsigned i = 0; i != NumElts; ++i) 4869 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 4870 return false; 4871 4872 return true; 4873 } 4874 4875 // If N is an integer constant that can be moved into a register in one 4876 // instruction, return an SDValue of such a constant (will become a MOV 4877 // instruction). Otherwise return null. 4878 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 4879 const ARMSubtarget *ST, SDLoc dl) { 4880 uint64_t Val; 4881 if (!isa<ConstantSDNode>(N)) 4882 return SDValue(); 4883 Val = cast<ConstantSDNode>(N)->getZExtValue(); 4884 4885 if (ST->isThumb1Only()) { 4886 if (Val <= 255 || ~Val <= 255) 4887 return DAG.getConstant(Val, MVT::i32); 4888 } else { 4889 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 4890 return DAG.getConstant(Val, MVT::i32); 4891 } 4892 return SDValue(); 4893 } 4894 4895 // If this is a case we can't handle, return null and let the default 4896 // expansion code take care of it. 4897 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 4898 const ARMSubtarget *ST) const { 4899 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4900 SDLoc dl(Op); 4901 EVT VT = Op.getValueType(); 4902 4903 APInt SplatBits, SplatUndef; 4904 unsigned SplatBitSize; 4905 bool HasAnyUndefs; 4906 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4907 if (SplatBitSize <= 64) { 4908 // Check if an immediate VMOV works. 4909 EVT VmovVT; 4910 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4911 SplatUndef.getZExtValue(), SplatBitSize, 4912 DAG, VmovVT, VT.is128BitVector(), 4913 VMOVModImm); 4914 if (Val.getNode()) { 4915 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 4916 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4917 } 4918 4919 // Try an immediate VMVN. 4920 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 4921 Val = isNEONModifiedImm(NegatedImm, 4922 SplatUndef.getZExtValue(), SplatBitSize, 4923 DAG, VmovVT, VT.is128BitVector(), 4924 VMVNModImm); 4925 if (Val.getNode()) { 4926 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 4927 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4928 } 4929 4930 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 4931 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 4932 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 4933 if (ImmVal != -1) { 4934 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 4935 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 4936 } 4937 } 4938 } 4939 } 4940 4941 // Scan through the operands to see if only one value is used. 4942 // 4943 // As an optimisation, even if more than one value is used it may be more 4944 // profitable to splat with one value then change some lanes. 4945 // 4946 // Heuristically we decide to do this if the vector has a "dominant" value, 4947 // defined as splatted to more than half of the lanes. 4948 unsigned NumElts = VT.getVectorNumElements(); 4949 bool isOnlyLowElement = true; 4950 bool usesOnlyOneValue = true; 4951 bool hasDominantValue = false; 4952 bool isConstant = true; 4953 4954 // Map of the number of times a particular SDValue appears in the 4955 // element list. 4956 DenseMap<SDValue, unsigned> ValueCounts; 4957 SDValue Value; 4958 for (unsigned i = 0; i < NumElts; ++i) { 4959 SDValue V = Op.getOperand(i); 4960 if (V.getOpcode() == ISD::UNDEF) 4961 continue; 4962 if (i > 0) 4963 isOnlyLowElement = false; 4964 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4965 isConstant = false; 4966 4967 ValueCounts.insert(std::make_pair(V, 0)); 4968 unsigned &Count = ValueCounts[V]; 4969 4970 // Is this value dominant? (takes up more than half of the lanes) 4971 if (++Count > (NumElts / 2)) { 4972 hasDominantValue = true; 4973 Value = V; 4974 } 4975 } 4976 if (ValueCounts.size() != 1) 4977 usesOnlyOneValue = false; 4978 if (!Value.getNode() && ValueCounts.size() > 0) 4979 Value = ValueCounts.begin()->first; 4980 4981 if (ValueCounts.size() == 0) 4982 return DAG.getUNDEF(VT); 4983 4984 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR. 4985 // Keep going if we are hitting this case. 4986 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 4987 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 4988 4989 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4990 4991 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 4992 // i32 and try again. 4993 if (hasDominantValue && EltSize <= 32) { 4994 if (!isConstant) { 4995 SDValue N; 4996 4997 // If we are VDUPing a value that comes directly from a vector, that will 4998 // cause an unnecessary move to and from a GPR, where instead we could 4999 // just use VDUPLANE. We can only do this if the lane being extracted 5000 // is at a constant index, as the VDUP from lane instructions only have 5001 // constant-index forms. 5002 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 5003 isa<ConstantSDNode>(Value->getOperand(1))) { 5004 // We need to create a new undef vector to use for the VDUPLANE if the 5005 // size of the vector from which we get the value is different than the 5006 // size of the vector that we need to create. We will insert the element 5007 // such that the register coalescer will remove unnecessary copies. 5008 if (VT != Value->getOperand(0).getValueType()) { 5009 ConstantSDNode *constIndex; 5010 constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)); 5011 assert(constIndex && "The index is not a constant!"); 5012 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 5013 VT.getVectorNumElements(); 5014 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5015 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 5016 Value, DAG.getConstant(index, MVT::i32)), 5017 DAG.getConstant(index, MVT::i32)); 5018 } else 5019 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5020 Value->getOperand(0), Value->getOperand(1)); 5021 } else 5022 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 5023 5024 if (!usesOnlyOneValue) { 5025 // The dominant value was splatted as 'N', but we now have to insert 5026 // all differing elements. 5027 for (unsigned I = 0; I < NumElts; ++I) { 5028 if (Op.getOperand(I) == Value) 5029 continue; 5030 SmallVector<SDValue, 3> Ops; 5031 Ops.push_back(N); 5032 Ops.push_back(Op.getOperand(I)); 5033 Ops.push_back(DAG.getConstant(I, MVT::i32)); 5034 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops); 5035 } 5036 } 5037 return N; 5038 } 5039 if (VT.getVectorElementType().isFloatingPoint()) { 5040 SmallVector<SDValue, 8> Ops; 5041 for (unsigned i = 0; i < NumElts; ++i) 5042 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 5043 Op.getOperand(i))); 5044 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 5045 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, Ops); 5046 Val = LowerBUILD_VECTOR(Val, DAG, ST); 5047 if (Val.getNode()) 5048 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5049 } 5050 if (usesOnlyOneValue) { 5051 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 5052 if (isConstant && Val.getNode()) 5053 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 5054 } 5055 } 5056 5057 // If all elements are constants and the case above didn't get hit, fall back 5058 // to the default expansion, which will generate a load from the constant 5059 // pool. 5060 if (isConstant) 5061 return SDValue(); 5062 5063 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 5064 if (NumElts >= 4) { 5065 SDValue shuffle = ReconstructShuffle(Op, DAG); 5066 if (shuffle != SDValue()) 5067 return shuffle; 5068 } 5069 5070 // Vectors with 32- or 64-bit elements can be built by directly assigning 5071 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 5072 // will be legalized. 5073 if (EltSize >= 32) { 5074 // Do the expansion with floating-point types, since that is what the VFP 5075 // registers are defined to use, and since i64 is not legal. 5076 EVT EltVT = EVT::getFloatingPointVT(EltSize); 5077 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 5078 SmallVector<SDValue, 8> Ops; 5079 for (unsigned i = 0; i < NumElts; ++i) 5080 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 5081 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 5082 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5083 } 5084 5085 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 5086 // know the default expansion would otherwise fall back on something even 5087 // worse. For a vector with one or two non-undef values, that's 5088 // scalar_to_vector for the elements followed by a shuffle (provided the 5089 // shuffle is valid for the target) and materialization element by element 5090 // on the stack followed by a load for everything else. 5091 if (!isConstant && !usesOnlyOneValue) { 5092 SDValue Vec = DAG.getUNDEF(VT); 5093 for (unsigned i = 0 ; i < NumElts; ++i) { 5094 SDValue V = Op.getOperand(i); 5095 if (V.getOpcode() == ISD::UNDEF) 5096 continue; 5097 SDValue LaneIdx = DAG.getConstant(i, MVT::i32); 5098 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 5099 } 5100 return Vec; 5101 } 5102 5103 return SDValue(); 5104 } 5105 5106 // Gather data to see if the operation can be modelled as a 5107 // shuffle in combination with VEXTs. 5108 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 5109 SelectionDAG &DAG) const { 5110 SDLoc dl(Op); 5111 EVT VT = Op.getValueType(); 5112 unsigned NumElts = VT.getVectorNumElements(); 5113 5114 SmallVector<SDValue, 2> SourceVecs; 5115 SmallVector<unsigned, 2> MinElts; 5116 SmallVector<unsigned, 2> MaxElts; 5117 5118 for (unsigned i = 0; i < NumElts; ++i) { 5119 SDValue V = Op.getOperand(i); 5120 if (V.getOpcode() == ISD::UNDEF) 5121 continue; 5122 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 5123 // A shuffle can only come from building a vector from various 5124 // elements of other vectors. 5125 return SDValue(); 5126 } else if (V.getOperand(0).getValueType().getVectorElementType() != 5127 VT.getVectorElementType()) { 5128 // This code doesn't know how to handle shuffles where the vector 5129 // element types do not match (this happens because type legalization 5130 // promotes the return type of EXTRACT_VECTOR_ELT). 5131 // FIXME: It might be appropriate to extend this code to handle 5132 // mismatched types. 5133 return SDValue(); 5134 } 5135 5136 // Record this extraction against the appropriate vector if possible... 5137 SDValue SourceVec = V.getOperand(0); 5138 // If the element number isn't a constant, we can't effectively 5139 // analyze what's going on. 5140 if (!isa<ConstantSDNode>(V.getOperand(1))) 5141 return SDValue(); 5142 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 5143 bool FoundSource = false; 5144 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 5145 if (SourceVecs[j] == SourceVec) { 5146 if (MinElts[j] > EltNo) 5147 MinElts[j] = EltNo; 5148 if (MaxElts[j] < EltNo) 5149 MaxElts[j] = EltNo; 5150 FoundSource = true; 5151 break; 5152 } 5153 } 5154 5155 // Or record a new source if not... 5156 if (!FoundSource) { 5157 SourceVecs.push_back(SourceVec); 5158 MinElts.push_back(EltNo); 5159 MaxElts.push_back(EltNo); 5160 } 5161 } 5162 5163 // Currently only do something sane when at most two source vectors 5164 // involved. 5165 if (SourceVecs.size() > 2) 5166 return SDValue(); 5167 5168 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 5169 int VEXTOffsets[2] = {0, 0}; 5170 5171 // This loop extracts the usage patterns of the source vectors 5172 // and prepares appropriate SDValues for a shuffle if possible. 5173 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 5174 if (SourceVecs[i].getValueType() == VT) { 5175 // No VEXT necessary 5176 ShuffleSrcs[i] = SourceVecs[i]; 5177 VEXTOffsets[i] = 0; 5178 continue; 5179 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 5180 // It probably isn't worth padding out a smaller vector just to 5181 // break it down again in a shuffle. 5182 return SDValue(); 5183 } 5184 5185 // Since only 64-bit and 128-bit vectors are legal on ARM and 5186 // we've eliminated the other cases... 5187 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 5188 "unexpected vector sizes in ReconstructShuffle"); 5189 5190 if (MaxElts[i] - MinElts[i] >= NumElts) { 5191 // Span too large for a VEXT to cope 5192 return SDValue(); 5193 } 5194 5195 if (MinElts[i] >= NumElts) { 5196 // The extraction can just take the second half 5197 VEXTOffsets[i] = NumElts; 5198 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5199 SourceVecs[i], 5200 DAG.getIntPtrConstant(NumElts)); 5201 } else if (MaxElts[i] < NumElts) { 5202 // The extraction can just take the first half 5203 VEXTOffsets[i] = 0; 5204 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5205 SourceVecs[i], 5206 DAG.getIntPtrConstant(0)); 5207 } else { 5208 // An actual VEXT is needed 5209 VEXTOffsets[i] = MinElts[i]; 5210 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5211 SourceVecs[i], 5212 DAG.getIntPtrConstant(0)); 5213 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 5214 SourceVecs[i], 5215 DAG.getIntPtrConstant(NumElts)); 5216 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 5217 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 5218 } 5219 } 5220 5221 SmallVector<int, 8> Mask; 5222 5223 for (unsigned i = 0; i < NumElts; ++i) { 5224 SDValue Entry = Op.getOperand(i); 5225 if (Entry.getOpcode() == ISD::UNDEF) { 5226 Mask.push_back(-1); 5227 continue; 5228 } 5229 5230 SDValue ExtractVec = Entry.getOperand(0); 5231 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 5232 .getOperand(1))->getSExtValue(); 5233 if (ExtractVec == SourceVecs[0]) { 5234 Mask.push_back(ExtractElt - VEXTOffsets[0]); 5235 } else { 5236 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 5237 } 5238 } 5239 5240 // Final check before we try to produce nonsense... 5241 if (isShuffleMaskLegal(Mask, VT)) 5242 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 5243 &Mask[0]); 5244 5245 return SDValue(); 5246 } 5247 5248 /// isShuffleMaskLegal - Targets can use this to indicate that they only 5249 /// support *some* VECTOR_SHUFFLE operations, those with specific masks. 5250 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 5251 /// are assumed to be legal. 5252 bool 5253 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 5254 EVT VT) const { 5255 if (VT.getVectorNumElements() == 4 && 5256 (VT.is128BitVector() || VT.is64BitVector())) { 5257 unsigned PFIndexes[4]; 5258 for (unsigned i = 0; i != 4; ++i) { 5259 if (M[i] < 0) 5260 PFIndexes[i] = 8; 5261 else 5262 PFIndexes[i] = M[i]; 5263 } 5264 5265 // Compute the index in the perfect shuffle table. 5266 unsigned PFTableIndex = 5267 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5268 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5269 unsigned Cost = (PFEntry >> 30); 5270 5271 if (Cost <= 4) 5272 return true; 5273 } 5274 5275 bool ReverseVEXT; 5276 unsigned Imm, WhichResult; 5277 5278 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5279 return (EltSize >= 32 || 5280 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 5281 isVREVMask(M, VT, 64) || 5282 isVREVMask(M, VT, 32) || 5283 isVREVMask(M, VT, 16) || 5284 isVEXTMask(M, VT, ReverseVEXT, Imm) || 5285 isVTBLMask(M, VT) || 5286 isVTRNMask(M, VT, WhichResult) || 5287 isVUZPMask(M, VT, WhichResult) || 5288 isVZIPMask(M, VT, WhichResult) || 5289 isVTRN_v_undef_Mask(M, VT, WhichResult) || 5290 isVUZP_v_undef_Mask(M, VT, WhichResult) || 5291 isVZIP_v_undef_Mask(M, VT, WhichResult) || 5292 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); 5293 } 5294 5295 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 5296 /// the specified operations to build the shuffle. 5297 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 5298 SDValue RHS, SelectionDAG &DAG, 5299 SDLoc dl) { 5300 unsigned OpNum = (PFEntry >> 26) & 0x0F; 5301 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 5302 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 5303 5304 enum { 5305 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 5306 OP_VREV, 5307 OP_VDUP0, 5308 OP_VDUP1, 5309 OP_VDUP2, 5310 OP_VDUP3, 5311 OP_VEXT1, 5312 OP_VEXT2, 5313 OP_VEXT3, 5314 OP_VUZPL, // VUZP, left result 5315 OP_VUZPR, // VUZP, right result 5316 OP_VZIPL, // VZIP, left result 5317 OP_VZIPR, // VZIP, right result 5318 OP_VTRNL, // VTRN, left result 5319 OP_VTRNR // VTRN, right result 5320 }; 5321 5322 if (OpNum == OP_COPY) { 5323 if (LHSID == (1*9+2)*9+3) return LHS; 5324 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 5325 return RHS; 5326 } 5327 5328 SDValue OpLHS, OpRHS; 5329 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 5330 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 5331 EVT VT = OpLHS.getValueType(); 5332 5333 switch (OpNum) { 5334 default: llvm_unreachable("Unknown shuffle opcode!"); 5335 case OP_VREV: 5336 // VREV divides the vector in half and swaps within the half. 5337 if (VT.getVectorElementType() == MVT::i32 || 5338 VT.getVectorElementType() == MVT::f32) 5339 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 5340 // vrev <4 x i16> -> VREV32 5341 if (VT.getVectorElementType() == MVT::i16) 5342 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 5343 // vrev <4 x i8> -> VREV16 5344 assert(VT.getVectorElementType() == MVT::i8); 5345 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 5346 case OP_VDUP0: 5347 case OP_VDUP1: 5348 case OP_VDUP2: 5349 case OP_VDUP3: 5350 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 5351 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 5352 case OP_VEXT1: 5353 case OP_VEXT2: 5354 case OP_VEXT3: 5355 return DAG.getNode(ARMISD::VEXT, dl, VT, 5356 OpLHS, OpRHS, 5357 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 5358 case OP_VUZPL: 5359 case OP_VUZPR: 5360 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5361 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 5362 case OP_VZIPL: 5363 case OP_VZIPR: 5364 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5365 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 5366 case OP_VTRNL: 5367 case OP_VTRNR: 5368 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5369 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 5370 } 5371 } 5372 5373 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 5374 ArrayRef<int> ShuffleMask, 5375 SelectionDAG &DAG) { 5376 // Check to see if we can use the VTBL instruction. 5377 SDValue V1 = Op.getOperand(0); 5378 SDValue V2 = Op.getOperand(1); 5379 SDLoc DL(Op); 5380 5381 SmallVector<SDValue, 8> VTBLMask; 5382 for (ArrayRef<int>::iterator 5383 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 5384 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 5385 5386 if (V2.getNode()->getOpcode() == ISD::UNDEF) 5387 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 5388 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask)); 5389 5390 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 5391 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, VTBLMask)); 5392 } 5393 5394 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 5395 SelectionDAG &DAG) { 5396 SDLoc DL(Op); 5397 SDValue OpLHS = Op.getOperand(0); 5398 EVT VT = OpLHS.getValueType(); 5399 5400 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 5401 "Expect an v8i16/v16i8 type"); 5402 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 5403 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 5404 // extract the first 8 bytes into the top double word and the last 8 bytes 5405 // into the bottom double word. The v8i16 case is similar. 5406 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 5407 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 5408 DAG.getConstant(ExtractNum, MVT::i32)); 5409 } 5410 5411 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 5412 SDValue V1 = Op.getOperand(0); 5413 SDValue V2 = Op.getOperand(1); 5414 SDLoc dl(Op); 5415 EVT VT = Op.getValueType(); 5416 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 5417 5418 // Convert shuffles that are directly supported on NEON to target-specific 5419 // DAG nodes, instead of keeping them as shuffles and matching them again 5420 // during code selection. This is more efficient and avoids the possibility 5421 // of inconsistencies between legalization and selection. 5422 // FIXME: floating-point vectors should be canonicalized to integer vectors 5423 // of the same time so that they get CSEd properly. 5424 ArrayRef<int> ShuffleMask = SVN->getMask(); 5425 5426 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5427 if (EltSize <= 32) { 5428 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 5429 int Lane = SVN->getSplatIndex(); 5430 // If this is undef splat, generate it via "just" vdup, if possible. 5431 if (Lane == -1) Lane = 0; 5432 5433 // Test if V1 is a SCALAR_TO_VECTOR. 5434 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5435 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 5436 } 5437 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 5438 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 5439 // reaches it). 5440 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 5441 !isa<ConstantSDNode>(V1.getOperand(0))) { 5442 bool IsScalarToVector = true; 5443 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 5444 if (V1.getOperand(i).getOpcode() != ISD::UNDEF) { 5445 IsScalarToVector = false; 5446 break; 5447 } 5448 if (IsScalarToVector) 5449 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 5450 } 5451 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 5452 DAG.getConstant(Lane, MVT::i32)); 5453 } 5454 5455 bool ReverseVEXT; 5456 unsigned Imm; 5457 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 5458 if (ReverseVEXT) 5459 std::swap(V1, V2); 5460 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 5461 DAG.getConstant(Imm, MVT::i32)); 5462 } 5463 5464 if (isVREVMask(ShuffleMask, VT, 64)) 5465 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 5466 if (isVREVMask(ShuffleMask, VT, 32)) 5467 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 5468 if (isVREVMask(ShuffleMask, VT, 16)) 5469 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 5470 5471 if (V2->getOpcode() == ISD::UNDEF && 5472 isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 5473 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 5474 DAG.getConstant(Imm, MVT::i32)); 5475 } 5476 5477 // Check for Neon shuffles that modify both input vectors in place. 5478 // If both results are used, i.e., if there are two shuffles with the same 5479 // source operands and with masks corresponding to both results of one of 5480 // these operations, DAG memoization will ensure that a single node is 5481 // used for both shuffles. 5482 unsigned WhichResult; 5483 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 5484 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5485 V1, V2).getValue(WhichResult); 5486 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 5487 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5488 V1, V2).getValue(WhichResult); 5489 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 5490 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5491 V1, V2).getValue(WhichResult); 5492 5493 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5494 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5495 V1, V1).getValue(WhichResult); 5496 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5497 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5498 V1, V1).getValue(WhichResult); 5499 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5500 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5501 V1, V1).getValue(WhichResult); 5502 } 5503 5504 // If the shuffle is not directly supported and it has 4 elements, use 5505 // the PerfectShuffle-generated table to synthesize it from other shuffles. 5506 unsigned NumElts = VT.getVectorNumElements(); 5507 if (NumElts == 4) { 5508 unsigned PFIndexes[4]; 5509 for (unsigned i = 0; i != 4; ++i) { 5510 if (ShuffleMask[i] < 0) 5511 PFIndexes[i] = 8; 5512 else 5513 PFIndexes[i] = ShuffleMask[i]; 5514 } 5515 5516 // Compute the index in the perfect shuffle table. 5517 unsigned PFTableIndex = 5518 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5519 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5520 unsigned Cost = (PFEntry >> 30); 5521 5522 if (Cost <= 4) 5523 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5524 } 5525 5526 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 5527 if (EltSize >= 32) { 5528 // Do the expansion with floating-point types, since that is what the VFP 5529 // registers are defined to use, and since i64 is not legal. 5530 EVT EltVT = EVT::getFloatingPointVT(EltSize); 5531 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 5532 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 5533 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 5534 SmallVector<SDValue, 8> Ops; 5535 for (unsigned i = 0; i < NumElts; ++i) { 5536 if (ShuffleMask[i] < 0) 5537 Ops.push_back(DAG.getUNDEF(EltVT)); 5538 else 5539 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 5540 ShuffleMask[i] < (int)NumElts ? V1 : V2, 5541 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 5542 MVT::i32))); 5543 } 5544 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops); 5545 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5546 } 5547 5548 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 5549 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 5550 5551 if (VT == MVT::v8i8) { 5552 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 5553 if (NewOp.getNode()) 5554 return NewOp; 5555 } 5556 5557 return SDValue(); 5558 } 5559 5560 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 5561 // INSERT_VECTOR_ELT is legal only for immediate indexes. 5562 SDValue Lane = Op.getOperand(2); 5563 if (!isa<ConstantSDNode>(Lane)) 5564 return SDValue(); 5565 5566 return Op; 5567 } 5568 5569 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 5570 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 5571 SDValue Lane = Op.getOperand(1); 5572 if (!isa<ConstantSDNode>(Lane)) 5573 return SDValue(); 5574 5575 SDValue Vec = Op.getOperand(0); 5576 if (Op.getValueType() == MVT::i32 && 5577 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 5578 SDLoc dl(Op); 5579 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 5580 } 5581 5582 return Op; 5583 } 5584 5585 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5586 // The only time a CONCAT_VECTORS operation can have legal types is when 5587 // two 64-bit vectors are concatenated to a 128-bit vector. 5588 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 5589 "unexpected CONCAT_VECTORS"); 5590 SDLoc dl(Op); 5591 SDValue Val = DAG.getUNDEF(MVT::v2f64); 5592 SDValue Op0 = Op.getOperand(0); 5593 SDValue Op1 = Op.getOperand(1); 5594 if (Op0.getOpcode() != ISD::UNDEF) 5595 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 5596 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 5597 DAG.getIntPtrConstant(0)); 5598 if (Op1.getOpcode() != ISD::UNDEF) 5599 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 5600 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 5601 DAG.getIntPtrConstant(1)); 5602 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 5603 } 5604 5605 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 5606 /// element has been zero/sign-extended, depending on the isSigned parameter, 5607 /// from an integer type half its size. 5608 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 5609 bool isSigned) { 5610 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 5611 EVT VT = N->getValueType(0); 5612 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 5613 SDNode *BVN = N->getOperand(0).getNode(); 5614 if (BVN->getValueType(0) != MVT::v4i32 || 5615 BVN->getOpcode() != ISD::BUILD_VECTOR) 5616 return false; 5617 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 5618 unsigned HiElt = 1 - LoElt; 5619 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 5620 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 5621 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 5622 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 5623 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 5624 return false; 5625 if (isSigned) { 5626 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 5627 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 5628 return true; 5629 } else { 5630 if (Hi0->isNullValue() && Hi1->isNullValue()) 5631 return true; 5632 } 5633 return false; 5634 } 5635 5636 if (N->getOpcode() != ISD::BUILD_VECTOR) 5637 return false; 5638 5639 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 5640 SDNode *Elt = N->getOperand(i).getNode(); 5641 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 5642 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5643 unsigned HalfSize = EltSize / 2; 5644 if (isSigned) { 5645 if (!isIntN(HalfSize, C->getSExtValue())) 5646 return false; 5647 } else { 5648 if (!isUIntN(HalfSize, C->getZExtValue())) 5649 return false; 5650 } 5651 continue; 5652 } 5653 return false; 5654 } 5655 5656 return true; 5657 } 5658 5659 /// isSignExtended - Check if a node is a vector value that is sign-extended 5660 /// or a constant BUILD_VECTOR with sign-extended elements. 5661 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 5662 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 5663 return true; 5664 if (isExtendedBUILD_VECTOR(N, DAG, true)) 5665 return true; 5666 return false; 5667 } 5668 5669 /// isZeroExtended - Check if a node is a vector value that is zero-extended 5670 /// or a constant BUILD_VECTOR with zero-extended elements. 5671 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 5672 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 5673 return true; 5674 if (isExtendedBUILD_VECTOR(N, DAG, false)) 5675 return true; 5676 return false; 5677 } 5678 5679 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 5680 if (OrigVT.getSizeInBits() >= 64) 5681 return OrigVT; 5682 5683 assert(OrigVT.isSimple() && "Expecting a simple value type"); 5684 5685 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 5686 switch (OrigSimpleTy) { 5687 default: llvm_unreachable("Unexpected Vector Type"); 5688 case MVT::v2i8: 5689 case MVT::v2i16: 5690 return MVT::v2i32; 5691 case MVT::v4i8: 5692 return MVT::v4i16; 5693 } 5694 } 5695 5696 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 5697 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 5698 /// We insert the required extension here to get the vector to fill a D register. 5699 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 5700 const EVT &OrigTy, 5701 const EVT &ExtTy, 5702 unsigned ExtOpcode) { 5703 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 5704 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 5705 // 64-bits we need to insert a new extension so that it will be 64-bits. 5706 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 5707 if (OrigTy.getSizeInBits() >= 64) 5708 return N; 5709 5710 // Must extend size to at least 64 bits to be used as an operand for VMULL. 5711 EVT NewVT = getExtensionTo64Bits(OrigTy); 5712 5713 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 5714 } 5715 5716 /// SkipLoadExtensionForVMULL - return a load of the original vector size that 5717 /// does not do any sign/zero extension. If the original vector is less 5718 /// than 64 bits, an appropriate extension will be added after the load to 5719 /// reach a total size of 64 bits. We have to add the extension separately 5720 /// because ARM does not have a sign/zero extending load for vectors. 5721 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 5722 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 5723 5724 // The load already has the right type. 5725 if (ExtendedTy == LD->getMemoryVT()) 5726 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(), 5727 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 5728 LD->isNonTemporal(), LD->isInvariant(), 5729 LD->getAlignment()); 5730 5731 // We need to create a zextload/sextload. We cannot just create a load 5732 // followed by a zext/zext node because LowerMUL is also run during normal 5733 // operation legalization where we can't create illegal types. 5734 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy, 5735 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 5736 LD->getMemoryVT(), LD->isVolatile(), 5737 LD->isNonTemporal(), LD->getAlignment()); 5738 } 5739 5740 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 5741 /// extending load, or BUILD_VECTOR with extended elements, return the 5742 /// unextended value. The unextended vector should be 64 bits so that it can 5743 /// be used as an operand to a VMULL instruction. If the original vector size 5744 /// before extension is less than 64 bits we add a an extension to resize 5745 /// the vector to 64 bits. 5746 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 5747 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 5748 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 5749 N->getOperand(0)->getValueType(0), 5750 N->getValueType(0), 5751 N->getOpcode()); 5752 5753 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 5754 return SkipLoadExtensionForVMULL(LD, DAG); 5755 5756 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 5757 // have been legalized as a BITCAST from v4i32. 5758 if (N->getOpcode() == ISD::BITCAST) { 5759 SDNode *BVN = N->getOperand(0).getNode(); 5760 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 5761 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 5762 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 5763 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), MVT::v2i32, 5764 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 5765 } 5766 // Construct a new BUILD_VECTOR with elements truncated to half the size. 5767 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 5768 EVT VT = N->getValueType(0); 5769 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 5770 unsigned NumElts = VT.getVectorNumElements(); 5771 MVT TruncVT = MVT::getIntegerVT(EltSize); 5772 SmallVector<SDValue, 8> Ops; 5773 for (unsigned i = 0; i != NumElts; ++i) { 5774 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 5775 const APInt &CInt = C->getAPIntValue(); 5776 // Element types smaller than 32 bits are not legal, so use i32 elements. 5777 // The values are implicitly truncated so sext vs. zext doesn't matter. 5778 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32)); 5779 } 5780 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), 5781 MVT::getVectorVT(TruncVT, NumElts), Ops); 5782 } 5783 5784 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 5785 unsigned Opcode = N->getOpcode(); 5786 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 5787 SDNode *N0 = N->getOperand(0).getNode(); 5788 SDNode *N1 = N->getOperand(1).getNode(); 5789 return N0->hasOneUse() && N1->hasOneUse() && 5790 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 5791 } 5792 return false; 5793 } 5794 5795 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 5796 unsigned Opcode = N->getOpcode(); 5797 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 5798 SDNode *N0 = N->getOperand(0).getNode(); 5799 SDNode *N1 = N->getOperand(1).getNode(); 5800 return N0->hasOneUse() && N1->hasOneUse() && 5801 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 5802 } 5803 return false; 5804 } 5805 5806 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 5807 // Multiplications are only custom-lowered for 128-bit vectors so that 5808 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 5809 EVT VT = Op.getValueType(); 5810 assert(VT.is128BitVector() && VT.isInteger() && 5811 "unexpected type for custom-lowering ISD::MUL"); 5812 SDNode *N0 = Op.getOperand(0).getNode(); 5813 SDNode *N1 = Op.getOperand(1).getNode(); 5814 unsigned NewOpc = 0; 5815 bool isMLA = false; 5816 bool isN0SExt = isSignExtended(N0, DAG); 5817 bool isN1SExt = isSignExtended(N1, DAG); 5818 if (isN0SExt && isN1SExt) 5819 NewOpc = ARMISD::VMULLs; 5820 else { 5821 bool isN0ZExt = isZeroExtended(N0, DAG); 5822 bool isN1ZExt = isZeroExtended(N1, DAG); 5823 if (isN0ZExt && isN1ZExt) 5824 NewOpc = ARMISD::VMULLu; 5825 else if (isN1SExt || isN1ZExt) { 5826 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 5827 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 5828 if (isN1SExt && isAddSubSExt(N0, DAG)) { 5829 NewOpc = ARMISD::VMULLs; 5830 isMLA = true; 5831 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 5832 NewOpc = ARMISD::VMULLu; 5833 isMLA = true; 5834 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 5835 std::swap(N0, N1); 5836 NewOpc = ARMISD::VMULLu; 5837 isMLA = true; 5838 } 5839 } 5840 5841 if (!NewOpc) { 5842 if (VT == MVT::v2i64) 5843 // Fall through to expand this. It is not legal. 5844 return SDValue(); 5845 else 5846 // Other vector multiplications are legal. 5847 return Op; 5848 } 5849 } 5850 5851 // Legalize to a VMULL instruction. 5852 SDLoc DL(Op); 5853 SDValue Op0; 5854 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 5855 if (!isMLA) { 5856 Op0 = SkipExtensionForVMULL(N0, DAG); 5857 assert(Op0.getValueType().is64BitVector() && 5858 Op1.getValueType().is64BitVector() && 5859 "unexpected types for extended operands to VMULL"); 5860 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 5861 } 5862 5863 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 5864 // isel lowering to take advantage of no-stall back to back vmul + vmla. 5865 // vmull q0, d4, d6 5866 // vmlal q0, d5, d6 5867 // is faster than 5868 // vaddl q0, d4, d5 5869 // vmovl q1, d6 5870 // vmul q0, q0, q1 5871 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 5872 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 5873 EVT Op1VT = Op1.getValueType(); 5874 return DAG.getNode(N0->getOpcode(), DL, VT, 5875 DAG.getNode(NewOpc, DL, VT, 5876 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 5877 DAG.getNode(NewOpc, DL, VT, 5878 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 5879 } 5880 5881 static SDValue 5882 LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG) { 5883 // Convert to float 5884 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 5885 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 5886 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 5887 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 5888 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 5889 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 5890 // Get reciprocal estimate. 5891 // float4 recip = vrecpeq_f32(yf); 5892 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5893 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 5894 // Because char has a smaller range than uchar, we can actually get away 5895 // without any newton steps. This requires that we use a weird bias 5896 // of 0xb000, however (again, this has been exhaustively tested). 5897 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 5898 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 5899 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 5900 Y = DAG.getConstant(0xb000, MVT::i32); 5901 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 5902 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 5903 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 5904 // Convert back to short. 5905 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 5906 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 5907 return X; 5908 } 5909 5910 static SDValue 5911 LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG) { 5912 SDValue N2; 5913 // Convert to float. 5914 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 5915 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 5916 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 5917 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 5918 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5919 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5920 5921 // Use reciprocal estimate and one refinement step. 5922 // float4 recip = vrecpeq_f32(yf); 5923 // recip *= vrecpsq_f32(yf, recip); 5924 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5925 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 5926 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5927 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5928 N1, N2); 5929 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5930 // Because short has a smaller range than ushort, we can actually get away 5931 // with only a single newton step. This requires that we use a weird bias 5932 // of 89, however (again, this has been exhaustively tested). 5933 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 5934 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5935 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5936 N1 = DAG.getConstant(0x89, MVT::i32); 5937 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5938 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5939 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5940 // Convert back to integer and return. 5941 // return vmovn_s32(vcvt_s32_f32(result)); 5942 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5943 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5944 return N0; 5945 } 5946 5947 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 5948 EVT VT = Op.getValueType(); 5949 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5950 "unexpected type for custom-lowering ISD::SDIV"); 5951 5952 SDLoc dl(Op); 5953 SDValue N0 = Op.getOperand(0); 5954 SDValue N1 = Op.getOperand(1); 5955 SDValue N2, N3; 5956 5957 if (VT == MVT::v8i8) { 5958 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 5959 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 5960 5961 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5962 DAG.getIntPtrConstant(4)); 5963 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5964 DAG.getIntPtrConstant(4)); 5965 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5966 DAG.getIntPtrConstant(0)); 5967 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5968 DAG.getIntPtrConstant(0)); 5969 5970 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 5971 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 5972 5973 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5974 N0 = LowerCONCAT_VECTORS(N0, DAG); 5975 5976 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 5977 return N0; 5978 } 5979 return LowerSDIV_v4i16(N0, N1, dl, DAG); 5980 } 5981 5982 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 5983 EVT VT = Op.getValueType(); 5984 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5985 "unexpected type for custom-lowering ISD::UDIV"); 5986 5987 SDLoc dl(Op); 5988 SDValue N0 = Op.getOperand(0); 5989 SDValue N1 = Op.getOperand(1); 5990 SDValue N2, N3; 5991 5992 if (VT == MVT::v8i8) { 5993 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 5994 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 5995 5996 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5997 DAG.getIntPtrConstant(4)); 5998 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5999 DAG.getIntPtrConstant(4)); 6000 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 6001 DAG.getIntPtrConstant(0)); 6002 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 6003 DAG.getIntPtrConstant(0)); 6004 6005 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 6006 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 6007 6008 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 6009 N0 = LowerCONCAT_VECTORS(N0, DAG); 6010 6011 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 6012 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 6013 N0); 6014 return N0; 6015 } 6016 6017 // v4i16 sdiv ... Convert to float. 6018 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 6019 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 6020 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 6021 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 6022 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 6023 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 6024 6025 // Use reciprocal estimate and two refinement steps. 6026 // float4 recip = vrecpeq_f32(yf); 6027 // recip *= vrecpsq_f32(yf, recip); 6028 // recip *= vrecpsq_f32(yf, recip); 6029 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6030 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 6031 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6032 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 6033 BN1, N2); 6034 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6035 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 6036 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 6037 BN1, N2); 6038 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 6039 // Simply multiplying by the reciprocal estimate can leave us a few ulps 6040 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 6041 // and that it will never cause us to return an answer too large). 6042 // float4 result = as_float4(as_int4(xf*recip) + 2); 6043 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 6044 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 6045 N1 = DAG.getConstant(2, MVT::i32); 6046 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 6047 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 6048 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 6049 // Convert back to integer and return. 6050 // return vmovn_u32(vcvt_s32_f32(result)); 6051 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 6052 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 6053 return N0; 6054 } 6055 6056 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 6057 EVT VT = Op.getNode()->getValueType(0); 6058 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 6059 6060 unsigned Opc; 6061 bool ExtraOp = false; 6062 switch (Op.getOpcode()) { 6063 default: llvm_unreachable("Invalid code"); 6064 case ISD::ADDC: Opc = ARMISD::ADDC; break; 6065 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 6066 case ISD::SUBC: Opc = ARMISD::SUBC; break; 6067 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 6068 } 6069 6070 if (!ExtraOp) 6071 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 6072 Op.getOperand(1)); 6073 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), 6074 Op.getOperand(1), Op.getOperand(2)); 6075 } 6076 6077 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { 6078 assert(Subtarget->isTargetDarwin()); 6079 6080 // For iOS, we want to call an alternative entry point: __sincos_stret, 6081 // return values are passed via sret. 6082 SDLoc dl(Op); 6083 SDValue Arg = Op.getOperand(0); 6084 EVT ArgVT = Arg.getValueType(); 6085 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 6086 6087 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 6088 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 6089 6090 // Pair of floats / doubles used to pass the result. 6091 StructType *RetTy = StructType::get(ArgTy, ArgTy, NULL); 6092 6093 // Create stack object for sret. 6094 const uint64_t ByteSize = TLI.getDataLayout()->getTypeAllocSize(RetTy); 6095 const unsigned StackAlign = TLI.getDataLayout()->getPrefTypeAlignment(RetTy); 6096 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); 6097 SDValue SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy()); 6098 6099 ArgListTy Args; 6100 ArgListEntry Entry; 6101 6102 Entry.Node = SRet; 6103 Entry.Ty = RetTy->getPointerTo(); 6104 Entry.isSExt = false; 6105 Entry.isZExt = false; 6106 Entry.isSRet = true; 6107 Args.push_back(Entry); 6108 6109 Entry.Node = Arg; 6110 Entry.Ty = ArgTy; 6111 Entry.isSExt = false; 6112 Entry.isZExt = false; 6113 Args.push_back(Entry); 6114 6115 const char *LibcallName = (ArgVT == MVT::f64) 6116 ? "__sincos_stret" : "__sincosf_stret"; 6117 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy()); 6118 6119 TargetLowering::CallLoweringInfo CLI(DAG); 6120 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()) 6121 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), Callee, 6122 std::move(Args), 0) 6123 .setDiscardResult(); 6124 6125 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 6126 6127 SDValue LoadSin = DAG.getLoad(ArgVT, dl, CallResult.second, SRet, 6128 MachinePointerInfo(), false, false, false, 0); 6129 6130 // Address of cos field. 6131 SDValue Add = DAG.getNode(ISD::ADD, dl, getPointerTy(), SRet, 6132 DAG.getIntPtrConstant(ArgVT.getStoreSize())); 6133 SDValue LoadCos = DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, 6134 MachinePointerInfo(), false, false, false, 0); 6135 6136 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); 6137 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, 6138 LoadSin.getValue(0), LoadCos.getValue(0)); 6139 } 6140 6141 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 6142 // Monotonic load/store is legal for all targets 6143 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 6144 return Op; 6145 6146 // Acquire/Release load/store is not legal for targets without a 6147 // dmb or equivalent available. 6148 return SDValue(); 6149 } 6150 6151 static void ReplaceREADCYCLECOUNTER(SDNode *N, 6152 SmallVectorImpl<SDValue> &Results, 6153 SelectionDAG &DAG, 6154 const ARMSubtarget *Subtarget) { 6155 SDLoc DL(N); 6156 SDValue Cycles32, OutChain; 6157 6158 if (Subtarget->hasPerfMon()) { 6159 // Under Power Management extensions, the cycle-count is: 6160 // mrc p15, #0, <Rt>, c9, c13, #0 6161 SDValue Ops[] = { N->getOperand(0), // Chain 6162 DAG.getConstant(Intrinsic::arm_mrc, MVT::i32), 6163 DAG.getConstant(15, MVT::i32), 6164 DAG.getConstant(0, MVT::i32), 6165 DAG.getConstant(9, MVT::i32), 6166 DAG.getConstant(13, MVT::i32), 6167 DAG.getConstant(0, MVT::i32) 6168 }; 6169 6170 Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, 6171 DAG.getVTList(MVT::i32, MVT::Other), Ops); 6172 OutChain = Cycles32.getValue(1); 6173 } else { 6174 // Intrinsic is defined to return 0 on unsupported platforms. Technically 6175 // there are older ARM CPUs that have implementation-specific ways of 6176 // obtaining this information (FIXME!). 6177 Cycles32 = DAG.getConstant(0, MVT::i32); 6178 OutChain = DAG.getEntryNode(); 6179 } 6180 6181 6182 SDValue Cycles64 = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, 6183 Cycles32, DAG.getConstant(0, MVT::i32)); 6184 Results.push_back(Cycles64); 6185 Results.push_back(OutChain); 6186 } 6187 6188 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 6189 switch (Op.getOpcode()) { 6190 default: llvm_unreachable("Don't know how to custom lower this!"); 6191 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 6192 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 6193 case ISD::GlobalAddress: 6194 switch (Subtarget->getTargetTriple().getObjectFormat()) { 6195 default: llvm_unreachable("unknown object format"); 6196 case Triple::COFF: 6197 return LowerGlobalAddressWindows(Op, DAG); 6198 case Triple::ELF: 6199 return LowerGlobalAddressELF(Op, DAG); 6200 case Triple::MachO: 6201 return LowerGlobalAddressDarwin(Op, DAG); 6202 } 6203 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 6204 case ISD::SELECT: return LowerSELECT(Op, DAG); 6205 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 6206 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 6207 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 6208 case ISD::VASTART: return LowerVASTART(Op, DAG); 6209 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 6210 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 6211 case ISD::SINT_TO_FP: 6212 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 6213 case ISD::FP_TO_SINT: 6214 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 6215 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 6216 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 6217 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 6218 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 6219 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 6220 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 6221 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 6222 Subtarget); 6223 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 6224 case ISD::SHL: 6225 case ISD::SRL: 6226 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 6227 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 6228 case ISD::SRL_PARTS: 6229 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 6230 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 6231 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 6232 case ISD::SETCC: return LowerVSETCC(Op, DAG); 6233 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 6234 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 6235 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 6236 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 6237 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 6238 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 6239 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 6240 case ISD::MUL: return LowerMUL(Op, DAG); 6241 case ISD::SDIV: return LowerSDIV(Op, DAG); 6242 case ISD::UDIV: return LowerUDIV(Op, DAG); 6243 case ISD::ADDC: 6244 case ISD::ADDE: 6245 case ISD::SUBC: 6246 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 6247 case ISD::SADDO: 6248 case ISD::UADDO: 6249 case ISD::SSUBO: 6250 case ISD::USUBO: 6251 return LowerXALUO(Op, DAG); 6252 case ISD::ATOMIC_LOAD: 6253 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 6254 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG); 6255 case ISD::SDIVREM: 6256 case ISD::UDIVREM: return LowerDivRem(Op, DAG); 6257 case ISD::DYNAMIC_STACKALLOC: 6258 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment()) 6259 return LowerDYNAMIC_STACKALLOC(Op, DAG); 6260 llvm_unreachable("Don't know how to custom lower this!"); 6261 } 6262 } 6263 6264 /// ReplaceNodeResults - Replace the results of node with an illegal result 6265 /// type with new values built out of custom code. 6266 void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 6267 SmallVectorImpl<SDValue>&Results, 6268 SelectionDAG &DAG) const { 6269 SDValue Res; 6270 switch (N->getOpcode()) { 6271 default: 6272 llvm_unreachable("Don't know how to custom expand this!"); 6273 case ISD::BITCAST: 6274 Res = ExpandBITCAST(N, DAG); 6275 break; 6276 case ISD::SRL: 6277 case ISD::SRA: 6278 Res = Expand64BitShift(N, DAG, Subtarget); 6279 break; 6280 case ISD::READCYCLECOUNTER: 6281 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget); 6282 return; 6283 } 6284 if (Res.getNode()) 6285 Results.push_back(Res); 6286 } 6287 6288 //===----------------------------------------------------------------------===// 6289 // ARM Scheduler Hooks 6290 //===----------------------------------------------------------------------===// 6291 6292 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 6293 /// registers the function context. 6294 void ARMTargetLowering:: 6295 SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 6296 MachineBasicBlock *DispatchBB, int FI) const { 6297 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6298 DebugLoc dl = MI->getDebugLoc(); 6299 MachineFunction *MF = MBB->getParent(); 6300 MachineRegisterInfo *MRI = &MF->getRegInfo(); 6301 MachineConstantPool *MCP = MF->getConstantPool(); 6302 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 6303 const Function *F = MF->getFunction(); 6304 6305 bool isThumb = Subtarget->isThumb(); 6306 bool isThumb2 = Subtarget->isThumb2(); 6307 6308 unsigned PCLabelId = AFI->createPICLabelUId(); 6309 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 6310 ARMConstantPoolValue *CPV = 6311 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 6312 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 6313 6314 const TargetRegisterClass *TRC = isThumb ? 6315 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6316 (const TargetRegisterClass*)&ARM::GPRRegClass; 6317 6318 // Grab constant pool and fixed stack memory operands. 6319 MachineMemOperand *CPMMO = 6320 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 6321 MachineMemOperand::MOLoad, 4, 4); 6322 6323 MachineMemOperand *FIMMOSt = 6324 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6325 MachineMemOperand::MOStore, 4, 4); 6326 6327 // Load the address of the dispatch MBB into the jump buffer. 6328 if (isThumb2) { 6329 // Incoming value: jbuf 6330 // ldr.n r5, LCPI1_1 6331 // orr r5, r5, #1 6332 // add r5, pc 6333 // str r5, [$jbuf, #+4] ; &jbuf[1] 6334 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6335 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 6336 .addConstantPoolIndex(CPI) 6337 .addMemOperand(CPMMO)); 6338 // Set the low bit because of thumb mode. 6339 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6340 AddDefaultCC( 6341 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 6342 .addReg(NewVReg1, RegState::Kill) 6343 .addImm(0x01))); 6344 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6345 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 6346 .addReg(NewVReg2, RegState::Kill) 6347 .addImm(PCLabelId); 6348 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 6349 .addReg(NewVReg3, RegState::Kill) 6350 .addFrameIndex(FI) 6351 .addImm(36) // &jbuf[1] :: pc 6352 .addMemOperand(FIMMOSt)); 6353 } else if (isThumb) { 6354 // Incoming value: jbuf 6355 // ldr.n r1, LCPI1_4 6356 // add r1, pc 6357 // mov r2, #1 6358 // orrs r1, r2 6359 // add r2, $jbuf, #+4 ; &jbuf[1] 6360 // str r1, [r2] 6361 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6362 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 6363 .addConstantPoolIndex(CPI) 6364 .addMemOperand(CPMMO)); 6365 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6366 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 6367 .addReg(NewVReg1, RegState::Kill) 6368 .addImm(PCLabelId); 6369 // Set the low bit because of thumb mode. 6370 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6371 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 6372 .addReg(ARM::CPSR, RegState::Define) 6373 .addImm(1)); 6374 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6375 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 6376 .addReg(ARM::CPSR, RegState::Define) 6377 .addReg(NewVReg2, RegState::Kill) 6378 .addReg(NewVReg3, RegState::Kill)); 6379 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6380 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 6381 .addFrameIndex(FI) 6382 .addImm(36)); // &jbuf[1] :: pc 6383 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 6384 .addReg(NewVReg4, RegState::Kill) 6385 .addReg(NewVReg5, RegState::Kill) 6386 .addImm(0) 6387 .addMemOperand(FIMMOSt)); 6388 } else { 6389 // Incoming value: jbuf 6390 // ldr r1, LCPI1_1 6391 // add r1, pc, r1 6392 // str r1, [$jbuf, #+4] ; &jbuf[1] 6393 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6394 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 6395 .addConstantPoolIndex(CPI) 6396 .addImm(0) 6397 .addMemOperand(CPMMO)); 6398 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6399 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 6400 .addReg(NewVReg1, RegState::Kill) 6401 .addImm(PCLabelId)); 6402 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 6403 .addReg(NewVReg2, RegState::Kill) 6404 .addFrameIndex(FI) 6405 .addImm(36) // &jbuf[1] :: pc 6406 .addMemOperand(FIMMOSt)); 6407 } 6408 } 6409 6410 MachineBasicBlock *ARMTargetLowering:: 6411 EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 6412 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6413 DebugLoc dl = MI->getDebugLoc(); 6414 MachineFunction *MF = MBB->getParent(); 6415 MachineRegisterInfo *MRI = &MF->getRegInfo(); 6416 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 6417 MachineFrameInfo *MFI = MF->getFrameInfo(); 6418 int FI = MFI->getFunctionContextIndex(); 6419 6420 const TargetRegisterClass *TRC = Subtarget->isThumb() ? 6421 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6422 (const TargetRegisterClass*)&ARM::GPRnopcRegClass; 6423 6424 // Get a mapping of the call site numbers to all of the landing pads they're 6425 // associated with. 6426 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 6427 unsigned MaxCSNum = 0; 6428 MachineModuleInfo &MMI = MF->getMMI(); 6429 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 6430 ++BB) { 6431 if (!BB->isLandingPad()) continue; 6432 6433 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 6434 // pad. 6435 for (MachineBasicBlock::iterator 6436 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 6437 if (!II->isEHLabel()) continue; 6438 6439 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 6440 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 6441 6442 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 6443 for (SmallVectorImpl<unsigned>::iterator 6444 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 6445 CSI != CSE; ++CSI) { 6446 CallSiteNumToLPad[*CSI].push_back(BB); 6447 MaxCSNum = std::max(MaxCSNum, *CSI); 6448 } 6449 break; 6450 } 6451 } 6452 6453 // Get an ordered list of the machine basic blocks for the jump table. 6454 std::vector<MachineBasicBlock*> LPadList; 6455 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 6456 LPadList.reserve(CallSiteNumToLPad.size()); 6457 for (unsigned I = 1; I <= MaxCSNum; ++I) { 6458 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 6459 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6460 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 6461 LPadList.push_back(*II); 6462 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 6463 } 6464 } 6465 6466 assert(!LPadList.empty() && 6467 "No landing pad destinations for the dispatch jump table!"); 6468 6469 // Create the jump table and associated information. 6470 MachineJumpTableInfo *JTI = 6471 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 6472 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 6473 unsigned UId = AFI->createJumpTableUId(); 6474 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 6475 6476 // Create the MBBs for the dispatch code. 6477 6478 // Shove the dispatch's address into the return slot in the function context. 6479 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 6480 DispatchBB->setIsLandingPad(); 6481 6482 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 6483 unsigned trap_opcode; 6484 if (Subtarget->isThumb()) 6485 trap_opcode = ARM::tTRAP; 6486 else 6487 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 6488 6489 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 6490 DispatchBB->addSuccessor(TrapBB); 6491 6492 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 6493 DispatchBB->addSuccessor(DispContBB); 6494 6495 // Insert and MBBs. 6496 MF->insert(MF->end(), DispatchBB); 6497 MF->insert(MF->end(), DispContBB); 6498 MF->insert(MF->end(), TrapBB); 6499 6500 // Insert code into the entry block that creates and registers the function 6501 // context. 6502 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 6503 6504 MachineMemOperand *FIMMOLd = 6505 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6506 MachineMemOperand::MOLoad | 6507 MachineMemOperand::MOVolatile, 4, 4); 6508 6509 MachineInstrBuilder MIB; 6510 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 6511 6512 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 6513 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 6514 6515 // Add a register mask with no preserved registers. This results in all 6516 // registers being marked as clobbered. 6517 MIB.addRegMask(RI.getNoPreservedMask()); 6518 6519 unsigned NumLPads = LPadList.size(); 6520 if (Subtarget->isThumb2()) { 6521 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6522 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 6523 .addFrameIndex(FI) 6524 .addImm(4) 6525 .addMemOperand(FIMMOLd)); 6526 6527 if (NumLPads < 256) { 6528 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 6529 .addReg(NewVReg1) 6530 .addImm(LPadList.size())); 6531 } else { 6532 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6533 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 6534 .addImm(NumLPads & 0xFFFF)); 6535 6536 unsigned VReg2 = VReg1; 6537 if ((NumLPads & 0xFFFF0000) != 0) { 6538 VReg2 = MRI->createVirtualRegister(TRC); 6539 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 6540 .addReg(VReg1) 6541 .addImm(NumLPads >> 16)); 6542 } 6543 6544 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 6545 .addReg(NewVReg1) 6546 .addReg(VReg2)); 6547 } 6548 6549 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 6550 .addMBB(TrapBB) 6551 .addImm(ARMCC::HI) 6552 .addReg(ARM::CPSR); 6553 6554 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6555 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 6556 .addJumpTableIndex(MJTI) 6557 .addImm(UId)); 6558 6559 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6560 AddDefaultCC( 6561 AddDefaultPred( 6562 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 6563 .addReg(NewVReg3, RegState::Kill) 6564 .addReg(NewVReg1) 6565 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6566 6567 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 6568 .addReg(NewVReg4, RegState::Kill) 6569 .addReg(NewVReg1) 6570 .addJumpTableIndex(MJTI) 6571 .addImm(UId); 6572 } else if (Subtarget->isThumb()) { 6573 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6574 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 6575 .addFrameIndex(FI) 6576 .addImm(1) 6577 .addMemOperand(FIMMOLd)); 6578 6579 if (NumLPads < 256) { 6580 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 6581 .addReg(NewVReg1) 6582 .addImm(NumLPads)); 6583 } else { 6584 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6585 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6586 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6587 6588 // MachineConstantPool wants an explicit alignment. 6589 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6590 if (Align == 0) 6591 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6592 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6593 6594 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6595 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 6596 .addReg(VReg1, RegState::Define) 6597 .addConstantPoolIndex(Idx)); 6598 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 6599 .addReg(NewVReg1) 6600 .addReg(VReg1)); 6601 } 6602 6603 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 6604 .addMBB(TrapBB) 6605 .addImm(ARMCC::HI) 6606 .addReg(ARM::CPSR); 6607 6608 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6609 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 6610 .addReg(ARM::CPSR, RegState::Define) 6611 .addReg(NewVReg1) 6612 .addImm(2)); 6613 6614 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6615 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 6616 .addJumpTableIndex(MJTI) 6617 .addImm(UId)); 6618 6619 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6620 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 6621 .addReg(ARM::CPSR, RegState::Define) 6622 .addReg(NewVReg2, RegState::Kill) 6623 .addReg(NewVReg3)); 6624 6625 MachineMemOperand *JTMMOLd = 6626 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6627 MachineMemOperand::MOLoad, 4, 4); 6628 6629 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6630 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 6631 .addReg(NewVReg4, RegState::Kill) 6632 .addImm(0) 6633 .addMemOperand(JTMMOLd)); 6634 6635 unsigned NewVReg6 = NewVReg5; 6636 if (RelocM == Reloc::PIC_) { 6637 NewVReg6 = MRI->createVirtualRegister(TRC); 6638 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 6639 .addReg(ARM::CPSR, RegState::Define) 6640 .addReg(NewVReg5, RegState::Kill) 6641 .addReg(NewVReg3)); 6642 } 6643 6644 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 6645 .addReg(NewVReg6, RegState::Kill) 6646 .addJumpTableIndex(MJTI) 6647 .addImm(UId); 6648 } else { 6649 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6650 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 6651 .addFrameIndex(FI) 6652 .addImm(4) 6653 .addMemOperand(FIMMOLd)); 6654 6655 if (NumLPads < 256) { 6656 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 6657 .addReg(NewVReg1) 6658 .addImm(NumLPads)); 6659 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 6660 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6661 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 6662 .addImm(NumLPads & 0xFFFF)); 6663 6664 unsigned VReg2 = VReg1; 6665 if ((NumLPads & 0xFFFF0000) != 0) { 6666 VReg2 = MRI->createVirtualRegister(TRC); 6667 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 6668 .addReg(VReg1) 6669 .addImm(NumLPads >> 16)); 6670 } 6671 6672 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6673 .addReg(NewVReg1) 6674 .addReg(VReg2)); 6675 } else { 6676 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6677 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6678 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6679 6680 // MachineConstantPool wants an explicit alignment. 6681 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6682 if (Align == 0) 6683 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6684 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6685 6686 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6687 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 6688 .addReg(VReg1, RegState::Define) 6689 .addConstantPoolIndex(Idx) 6690 .addImm(0)); 6691 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6692 .addReg(NewVReg1) 6693 .addReg(VReg1, RegState::Kill)); 6694 } 6695 6696 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 6697 .addMBB(TrapBB) 6698 .addImm(ARMCC::HI) 6699 .addReg(ARM::CPSR); 6700 6701 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6702 AddDefaultCC( 6703 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 6704 .addReg(NewVReg1) 6705 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6706 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6707 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 6708 .addJumpTableIndex(MJTI) 6709 .addImm(UId)); 6710 6711 MachineMemOperand *JTMMOLd = 6712 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6713 MachineMemOperand::MOLoad, 4, 4); 6714 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6715 AddDefaultPred( 6716 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 6717 .addReg(NewVReg3, RegState::Kill) 6718 .addReg(NewVReg4) 6719 .addImm(0) 6720 .addMemOperand(JTMMOLd)); 6721 6722 if (RelocM == Reloc::PIC_) { 6723 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 6724 .addReg(NewVReg5, RegState::Kill) 6725 .addReg(NewVReg4) 6726 .addJumpTableIndex(MJTI) 6727 .addImm(UId); 6728 } else { 6729 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 6730 .addReg(NewVReg5, RegState::Kill) 6731 .addJumpTableIndex(MJTI) 6732 .addImm(UId); 6733 } 6734 } 6735 6736 // Add the jump table entries as successors to the MBB. 6737 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 6738 for (std::vector<MachineBasicBlock*>::iterator 6739 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 6740 MachineBasicBlock *CurMBB = *I; 6741 if (SeenMBBs.insert(CurMBB)) 6742 DispContBB->addSuccessor(CurMBB); 6743 } 6744 6745 // N.B. the order the invoke BBs are processed in doesn't matter here. 6746 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF); 6747 SmallVector<MachineBasicBlock*, 64> MBBLPads; 6748 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 6749 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 6750 MachineBasicBlock *BB = *I; 6751 6752 // Remove the landing pad successor from the invoke block and replace it 6753 // with the new dispatch block. 6754 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 6755 BB->succ_end()); 6756 while (!Successors.empty()) { 6757 MachineBasicBlock *SMBB = Successors.pop_back_val(); 6758 if (SMBB->isLandingPad()) { 6759 BB->removeSuccessor(SMBB); 6760 MBBLPads.push_back(SMBB); 6761 } 6762 } 6763 6764 BB->addSuccessor(DispatchBB); 6765 6766 // Find the invoke call and mark all of the callee-saved registers as 6767 // 'implicit defined' so that they're spilled. This prevents code from 6768 // moving instructions to before the EH block, where they will never be 6769 // executed. 6770 for (MachineBasicBlock::reverse_iterator 6771 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 6772 if (!II->isCall()) continue; 6773 6774 DenseMap<unsigned, bool> DefRegs; 6775 for (MachineInstr::mop_iterator 6776 OI = II->operands_begin(), OE = II->operands_end(); 6777 OI != OE; ++OI) { 6778 if (!OI->isReg()) continue; 6779 DefRegs[OI->getReg()] = true; 6780 } 6781 6782 MachineInstrBuilder MIB(*MF, &*II); 6783 6784 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 6785 unsigned Reg = SavedRegs[i]; 6786 if (Subtarget->isThumb2() && 6787 !ARM::tGPRRegClass.contains(Reg) && 6788 !ARM::hGPRRegClass.contains(Reg)) 6789 continue; 6790 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 6791 continue; 6792 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 6793 continue; 6794 if (!DefRegs[Reg]) 6795 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 6796 } 6797 6798 break; 6799 } 6800 } 6801 6802 // Mark all former landing pads as non-landing pads. The dispatch is the only 6803 // landing pad now. 6804 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6805 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 6806 (*I)->setIsLandingPad(false); 6807 6808 // The instruction is gone now. 6809 MI->eraseFromParent(); 6810 6811 return MBB; 6812 } 6813 6814 static 6815 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 6816 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 6817 E = MBB->succ_end(); I != E; ++I) 6818 if (*I != Succ) 6819 return *I; 6820 llvm_unreachable("Expecting a BB with two successors!"); 6821 } 6822 6823 /// Return the load opcode for a given load size. If load size >= 8, 6824 /// neon opcode will be returned. 6825 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { 6826 if (LdSize >= 8) 6827 return LdSize == 16 ? ARM::VLD1q32wb_fixed 6828 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0; 6829 if (IsThumb1) 6830 return LdSize == 4 ? ARM::tLDRi 6831 : LdSize == 2 ? ARM::tLDRHi 6832 : LdSize == 1 ? ARM::tLDRBi : 0; 6833 if (IsThumb2) 6834 return LdSize == 4 ? ARM::t2LDR_POST 6835 : LdSize == 2 ? ARM::t2LDRH_POST 6836 : LdSize == 1 ? ARM::t2LDRB_POST : 0; 6837 return LdSize == 4 ? ARM::LDR_POST_IMM 6838 : LdSize == 2 ? ARM::LDRH_POST 6839 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0; 6840 } 6841 6842 /// Return the store opcode for a given store size. If store size >= 8, 6843 /// neon opcode will be returned. 6844 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { 6845 if (StSize >= 8) 6846 return StSize == 16 ? ARM::VST1q32wb_fixed 6847 : StSize == 8 ? ARM::VST1d32wb_fixed : 0; 6848 if (IsThumb1) 6849 return StSize == 4 ? ARM::tSTRi 6850 : StSize == 2 ? ARM::tSTRHi 6851 : StSize == 1 ? ARM::tSTRBi : 0; 6852 if (IsThumb2) 6853 return StSize == 4 ? ARM::t2STR_POST 6854 : StSize == 2 ? ARM::t2STRH_POST 6855 : StSize == 1 ? ARM::t2STRB_POST : 0; 6856 return StSize == 4 ? ARM::STR_POST_IMM 6857 : StSize == 2 ? ARM::STRH_POST 6858 : StSize == 1 ? ARM::STRB_POST_IMM : 0; 6859 } 6860 6861 /// Emit a post-increment load operation with given size. The instructions 6862 /// will be added to BB at Pos. 6863 static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos, 6864 const TargetInstrInfo *TII, DebugLoc dl, 6865 unsigned LdSize, unsigned Data, unsigned AddrIn, 6866 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 6867 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2); 6868 assert(LdOpc != 0 && "Should have a load opcode"); 6869 if (LdSize >= 8) { 6870 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 6871 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 6872 .addImm(0)); 6873 } else if (IsThumb1) { 6874 // load + update AddrIn 6875 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 6876 .addReg(AddrIn).addImm(0)); 6877 MachineInstrBuilder MIB = 6878 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 6879 MIB = AddDefaultT1CC(MIB); 6880 MIB.addReg(AddrIn).addImm(LdSize); 6881 AddDefaultPred(MIB); 6882 } else if (IsThumb2) { 6883 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 6884 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 6885 .addImm(LdSize)); 6886 } else { // arm 6887 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data) 6888 .addReg(AddrOut, RegState::Define).addReg(AddrIn) 6889 .addReg(0).addImm(LdSize)); 6890 } 6891 } 6892 6893 /// Emit a post-increment store operation with given size. The instructions 6894 /// will be added to BB at Pos. 6895 static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos, 6896 const TargetInstrInfo *TII, DebugLoc dl, 6897 unsigned StSize, unsigned Data, unsigned AddrIn, 6898 unsigned AddrOut, bool IsThumb1, bool IsThumb2) { 6899 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2); 6900 assert(StOpc != 0 && "Should have a store opcode"); 6901 if (StSize >= 8) { 6902 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 6903 .addReg(AddrIn).addImm(0).addReg(Data)); 6904 } else if (IsThumb1) { 6905 // store + update AddrIn 6906 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data) 6907 .addReg(AddrIn).addImm(0)); 6908 MachineInstrBuilder MIB = 6909 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut); 6910 MIB = AddDefaultT1CC(MIB); 6911 MIB.addReg(AddrIn).addImm(StSize); 6912 AddDefaultPred(MIB); 6913 } else if (IsThumb2) { 6914 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 6915 .addReg(Data).addReg(AddrIn).addImm(StSize)); 6916 } else { // arm 6917 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut) 6918 .addReg(Data).addReg(AddrIn).addReg(0) 6919 .addImm(StSize)); 6920 } 6921 } 6922 6923 MachineBasicBlock * 6924 ARMTargetLowering::EmitStructByval(MachineInstr *MI, 6925 MachineBasicBlock *BB) const { 6926 // This pseudo instruction has 3 operands: dst, src, size 6927 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 6928 // Otherwise, we will generate unrolled scalar copies. 6929 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6930 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6931 MachineFunction::iterator It = BB; 6932 ++It; 6933 6934 unsigned dest = MI->getOperand(0).getReg(); 6935 unsigned src = MI->getOperand(1).getReg(); 6936 unsigned SizeVal = MI->getOperand(2).getImm(); 6937 unsigned Align = MI->getOperand(3).getImm(); 6938 DebugLoc dl = MI->getDebugLoc(); 6939 6940 MachineFunction *MF = BB->getParent(); 6941 MachineRegisterInfo &MRI = MF->getRegInfo(); 6942 unsigned UnitSize = 0; 6943 const TargetRegisterClass *TRC = nullptr; 6944 const TargetRegisterClass *VecTRC = nullptr; 6945 6946 bool IsThumb1 = Subtarget->isThumb1Only(); 6947 bool IsThumb2 = Subtarget->isThumb2(); 6948 6949 if (Align & 1) { 6950 UnitSize = 1; 6951 } else if (Align & 2) { 6952 UnitSize = 2; 6953 } else { 6954 // Check whether we can use NEON instructions. 6955 if (!MF->getFunction()->getAttributes(). 6956 hasAttribute(AttributeSet::FunctionIndex, 6957 Attribute::NoImplicitFloat) && 6958 Subtarget->hasNEON()) { 6959 if ((Align % 16 == 0) && SizeVal >= 16) 6960 UnitSize = 16; 6961 else if ((Align % 8 == 0) && SizeVal >= 8) 6962 UnitSize = 8; 6963 } 6964 // Can't use NEON instructions. 6965 if (UnitSize == 0) 6966 UnitSize = 4; 6967 } 6968 6969 // Select the correct opcode and register class for unit size load/store 6970 bool IsNeon = UnitSize >= 8; 6971 TRC = (IsThumb1 || IsThumb2) ? (const TargetRegisterClass *)&ARM::tGPRRegClass 6972 : (const TargetRegisterClass *)&ARM::GPRRegClass; 6973 if (IsNeon) 6974 VecTRC = UnitSize == 16 6975 ? (const TargetRegisterClass *)&ARM::DPairRegClass 6976 : UnitSize == 8 6977 ? (const TargetRegisterClass *)&ARM::DPRRegClass 6978 : nullptr; 6979 6980 unsigned BytesLeft = SizeVal % UnitSize; 6981 unsigned LoopSize = SizeVal - BytesLeft; 6982 6983 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 6984 // Use LDR and STR to copy. 6985 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 6986 // [destOut] = STR_POST(scratch, destIn, UnitSize) 6987 unsigned srcIn = src; 6988 unsigned destIn = dest; 6989 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 6990 unsigned srcOut = MRI.createVirtualRegister(TRC); 6991 unsigned destOut = MRI.createVirtualRegister(TRC); 6992 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 6993 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut, 6994 IsThumb1, IsThumb2); 6995 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut, 6996 IsThumb1, IsThumb2); 6997 srcIn = srcOut; 6998 destIn = destOut; 6999 } 7000 7001 // Handle the leftover bytes with LDRB and STRB. 7002 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 7003 // [destOut] = STRB_POST(scratch, destIn, 1) 7004 for (unsigned i = 0; i < BytesLeft; i++) { 7005 unsigned srcOut = MRI.createVirtualRegister(TRC); 7006 unsigned destOut = MRI.createVirtualRegister(TRC); 7007 unsigned scratch = MRI.createVirtualRegister(TRC); 7008 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut, 7009 IsThumb1, IsThumb2); 7010 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut, 7011 IsThumb1, IsThumb2); 7012 srcIn = srcOut; 7013 destIn = destOut; 7014 } 7015 MI->eraseFromParent(); // The instruction is gone now. 7016 return BB; 7017 } 7018 7019 // Expand the pseudo op to a loop. 7020 // thisMBB: 7021 // ... 7022 // movw varEnd, # --> with thumb2 7023 // movt varEnd, # 7024 // ldrcp varEnd, idx --> without thumb2 7025 // fallthrough --> loopMBB 7026 // loopMBB: 7027 // PHI varPhi, varEnd, varLoop 7028 // PHI srcPhi, src, srcLoop 7029 // PHI destPhi, dst, destLoop 7030 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7031 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 7032 // subs varLoop, varPhi, #UnitSize 7033 // bne loopMBB 7034 // fallthrough --> exitMBB 7035 // exitMBB: 7036 // epilogue to handle left-over bytes 7037 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7038 // [destOut] = STRB_POST(scratch, destLoop, 1) 7039 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7040 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7041 MF->insert(It, loopMBB); 7042 MF->insert(It, exitMBB); 7043 7044 // Transfer the remainder of BB and its successor edges to exitMBB. 7045 exitMBB->splice(exitMBB->begin(), BB, 7046 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7047 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7048 7049 // Load an immediate to varEnd. 7050 unsigned varEnd = MRI.createVirtualRegister(TRC); 7051 if (IsThumb2) { 7052 unsigned Vtmp = varEnd; 7053 if ((LoopSize & 0xFFFF0000) != 0) 7054 Vtmp = MRI.createVirtualRegister(TRC); 7055 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), Vtmp) 7056 .addImm(LoopSize & 0xFFFF)); 7057 7058 if ((LoopSize & 0xFFFF0000) != 0) 7059 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVTi16), varEnd) 7060 .addReg(Vtmp).addImm(LoopSize >> 16)); 7061 } else { 7062 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7063 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7064 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 7065 7066 // MachineConstantPool wants an explicit alignment. 7067 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 7068 if (Align == 0) 7069 Align = getDataLayout()->getTypeAllocSize(C->getType()); 7070 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7071 7072 if (IsThumb1) 7073 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg( 7074 varEnd, RegState::Define).addConstantPoolIndex(Idx)); 7075 else 7076 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg( 7077 varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0)); 7078 } 7079 BB->addSuccessor(loopMBB); 7080 7081 // Generate the loop body: 7082 // varPhi = PHI(varLoop, varEnd) 7083 // srcPhi = PHI(srcLoop, src) 7084 // destPhi = PHI(destLoop, dst) 7085 MachineBasicBlock *entryBB = BB; 7086 BB = loopMBB; 7087 unsigned varLoop = MRI.createVirtualRegister(TRC); 7088 unsigned varPhi = MRI.createVirtualRegister(TRC); 7089 unsigned srcLoop = MRI.createVirtualRegister(TRC); 7090 unsigned srcPhi = MRI.createVirtualRegister(TRC); 7091 unsigned destLoop = MRI.createVirtualRegister(TRC); 7092 unsigned destPhi = MRI.createVirtualRegister(TRC); 7093 7094 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 7095 .addReg(varLoop).addMBB(loopMBB) 7096 .addReg(varEnd).addMBB(entryBB); 7097 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 7098 .addReg(srcLoop).addMBB(loopMBB) 7099 .addReg(src).addMBB(entryBB); 7100 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 7101 .addReg(destLoop).addMBB(loopMBB) 7102 .addReg(dest).addMBB(entryBB); 7103 7104 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7105 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 7106 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC); 7107 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop, 7108 IsThumb1, IsThumb2); 7109 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop, 7110 IsThumb1, IsThumb2); 7111 7112 // Decrement loop variable by UnitSize. 7113 if (IsThumb1) { 7114 MachineInstrBuilder MIB = 7115 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop); 7116 MIB = AddDefaultT1CC(MIB); 7117 MIB.addReg(varPhi).addImm(UnitSize); 7118 AddDefaultPred(MIB); 7119 } else { 7120 MachineInstrBuilder MIB = 7121 BuildMI(*BB, BB->end(), dl, 7122 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 7123 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 7124 MIB->getOperand(5).setReg(ARM::CPSR); 7125 MIB->getOperand(5).setIsDef(true); 7126 } 7127 BuildMI(*BB, BB->end(), dl, 7128 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7129 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 7130 7131 // loopMBB can loop back to loopMBB or fall through to exitMBB. 7132 BB->addSuccessor(loopMBB); 7133 BB->addSuccessor(exitMBB); 7134 7135 // Add epilogue to handle BytesLeft. 7136 BB = exitMBB; 7137 MachineInstr *StartOfExit = exitMBB->begin(); 7138 7139 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7140 // [destOut] = STRB_POST(scratch, destLoop, 1) 7141 unsigned srcIn = srcLoop; 7142 unsigned destIn = destLoop; 7143 for (unsigned i = 0; i < BytesLeft; i++) { 7144 unsigned srcOut = MRI.createVirtualRegister(TRC); 7145 unsigned destOut = MRI.createVirtualRegister(TRC); 7146 unsigned scratch = MRI.createVirtualRegister(TRC); 7147 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut, 7148 IsThumb1, IsThumb2); 7149 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut, 7150 IsThumb1, IsThumb2); 7151 srcIn = srcOut; 7152 destIn = destOut; 7153 } 7154 7155 MI->eraseFromParent(); // The instruction is gone now. 7156 return BB; 7157 } 7158 7159 MachineBasicBlock * 7160 ARMTargetLowering::EmitLowered__chkstk(MachineInstr *MI, 7161 MachineBasicBlock *MBB) const { 7162 const TargetMachine &TM = getTargetMachine(); 7163 const TargetInstrInfo &TII = *TM.getInstrInfo(); 7164 DebugLoc DL = MI->getDebugLoc(); 7165 7166 assert(Subtarget->isTargetWindows() && 7167 "__chkstk is only supported on Windows"); 7168 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode"); 7169 7170 // __chkstk takes the number of words to allocate on the stack in R4, and 7171 // returns the stack adjustment in number of bytes in R4. This will not 7172 // clober any other registers (other than the obvious lr). 7173 // 7174 // Although, technically, IP should be considered a register which may be 7175 // clobbered, the call itself will not touch it. Windows on ARM is a pure 7176 // thumb-2 environment, so there is no interworking required. As a result, we 7177 // do not expect a veneer to be emitted by the linker, clobbering IP. 7178 // 7179 // Each module receives its own copy of __chkstk, so no import thunk is 7180 // required, again, ensuring that IP is not clobbered. 7181 // 7182 // Finally, although some linkers may theoretically provide a trampoline for 7183 // out of range calls (which is quite common due to a 32M range limitation of 7184 // branches for Thumb), we can generate the long-call version via 7185 // -mcmodel=large, alleviating the need for the trampoline which may clobber 7186 // IP. 7187 7188 switch (TM.getCodeModel()) { 7189 case CodeModel::Small: 7190 case CodeModel::Medium: 7191 case CodeModel::Default: 7192 case CodeModel::Kernel: 7193 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL)) 7194 .addImm((unsigned)ARMCC::AL).addReg(0) 7195 .addExternalSymbol("__chkstk") 7196 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 7197 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 7198 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 7199 break; 7200 case CodeModel::Large: 7201 case CodeModel::JITDefault: { 7202 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 7203 unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass); 7204 7205 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg) 7206 .addExternalSymbol("__chkstk"); 7207 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr)) 7208 .addImm((unsigned)ARMCC::AL).addReg(0) 7209 .addReg(Reg, RegState::Kill) 7210 .addReg(ARM::R4, RegState::Implicit | RegState::Kill) 7211 .addReg(ARM::R4, RegState::Implicit | RegState::Define) 7212 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead); 7213 break; 7214 } 7215 } 7216 7217 AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), 7218 ARM::SP) 7219 .addReg(ARM::SP, RegState::Define) 7220 .addReg(ARM::R4, RegState::Kill))); 7221 7222 MI->eraseFromParent(); 7223 return MBB; 7224 } 7225 7226 MachineBasicBlock * 7227 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 7228 MachineBasicBlock *BB) const { 7229 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 7230 DebugLoc dl = MI->getDebugLoc(); 7231 bool isThumb2 = Subtarget->isThumb2(); 7232 switch (MI->getOpcode()) { 7233 default: { 7234 MI->dump(); 7235 llvm_unreachable("Unexpected instr type to insert"); 7236 } 7237 // The Thumb2 pre-indexed stores have the same MI operands, they just 7238 // define them differently in the .td files from the isel patterns, so 7239 // they need pseudos. 7240 case ARM::t2STR_preidx: 7241 MI->setDesc(TII->get(ARM::t2STR_PRE)); 7242 return BB; 7243 case ARM::t2STRB_preidx: 7244 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 7245 return BB; 7246 case ARM::t2STRH_preidx: 7247 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 7248 return BB; 7249 7250 case ARM::STRi_preidx: 7251 case ARM::STRBi_preidx: { 7252 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 7253 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 7254 // Decode the offset. 7255 unsigned Offset = MI->getOperand(4).getImm(); 7256 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 7257 Offset = ARM_AM::getAM2Offset(Offset); 7258 if (isSub) 7259 Offset = -Offset; 7260 7261 MachineMemOperand *MMO = *MI->memoperands_begin(); 7262 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 7263 .addOperand(MI->getOperand(0)) // Rn_wb 7264 .addOperand(MI->getOperand(1)) // Rt 7265 .addOperand(MI->getOperand(2)) // Rn 7266 .addImm(Offset) // offset (skip GPR==zero_reg) 7267 .addOperand(MI->getOperand(5)) // pred 7268 .addOperand(MI->getOperand(6)) 7269 .addMemOperand(MMO); 7270 MI->eraseFromParent(); 7271 return BB; 7272 } 7273 case ARM::STRr_preidx: 7274 case ARM::STRBr_preidx: 7275 case ARM::STRH_preidx: { 7276 unsigned NewOpc; 7277 switch (MI->getOpcode()) { 7278 default: llvm_unreachable("unexpected opcode!"); 7279 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 7280 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 7281 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 7282 } 7283 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 7284 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 7285 MIB.addOperand(MI->getOperand(i)); 7286 MI->eraseFromParent(); 7287 return BB; 7288 } 7289 7290 case ARM::tMOVCCr_pseudo: { 7291 // To "insert" a SELECT_CC instruction, we actually have to insert the 7292 // diamond control-flow pattern. The incoming instruction knows the 7293 // destination vreg to set, the condition code register to branch on, the 7294 // true/false values to select between, and a branch opcode to use. 7295 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7296 MachineFunction::iterator It = BB; 7297 ++It; 7298 7299 // thisMBB: 7300 // ... 7301 // TrueVal = ... 7302 // cmpTY ccX, r1, r2 7303 // bCC copy1MBB 7304 // fallthrough --> copy0MBB 7305 MachineBasicBlock *thisMBB = BB; 7306 MachineFunction *F = BB->getParent(); 7307 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 7308 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 7309 F->insert(It, copy0MBB); 7310 F->insert(It, sinkMBB); 7311 7312 // Transfer the remainder of BB and its successor edges to sinkMBB. 7313 sinkMBB->splice(sinkMBB->begin(), BB, 7314 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7315 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 7316 7317 BB->addSuccessor(copy0MBB); 7318 BB->addSuccessor(sinkMBB); 7319 7320 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 7321 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 7322 7323 // copy0MBB: 7324 // %FalseValue = ... 7325 // # fallthrough to sinkMBB 7326 BB = copy0MBB; 7327 7328 // Update machine-CFG edges 7329 BB->addSuccessor(sinkMBB); 7330 7331 // sinkMBB: 7332 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 7333 // ... 7334 BB = sinkMBB; 7335 BuildMI(*BB, BB->begin(), dl, 7336 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 7337 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 7338 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 7339 7340 MI->eraseFromParent(); // The pseudo instruction is gone now. 7341 return BB; 7342 } 7343 7344 case ARM::BCCi64: 7345 case ARM::BCCZi64: { 7346 // If there is an unconditional branch to the other successor, remove it. 7347 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7348 7349 // Compare both parts that make up the double comparison separately for 7350 // equality. 7351 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 7352 7353 unsigned LHS1 = MI->getOperand(1).getReg(); 7354 unsigned LHS2 = MI->getOperand(2).getReg(); 7355 if (RHSisZero) { 7356 AddDefaultPred(BuildMI(BB, dl, 7357 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7358 .addReg(LHS1).addImm(0)); 7359 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7360 .addReg(LHS2).addImm(0) 7361 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7362 } else { 7363 unsigned RHS1 = MI->getOperand(3).getReg(); 7364 unsigned RHS2 = MI->getOperand(4).getReg(); 7365 AddDefaultPred(BuildMI(BB, dl, 7366 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7367 .addReg(LHS1).addReg(RHS1)); 7368 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7369 .addReg(LHS2).addReg(RHS2) 7370 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7371 } 7372 7373 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 7374 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 7375 if (MI->getOperand(0).getImm() == ARMCC::NE) 7376 std::swap(destMBB, exitMBB); 7377 7378 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7379 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 7380 if (isThumb2) 7381 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 7382 else 7383 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 7384 7385 MI->eraseFromParent(); // The pseudo instruction is gone now. 7386 return BB; 7387 } 7388 7389 case ARM::Int_eh_sjlj_setjmp: 7390 case ARM::Int_eh_sjlj_setjmp_nofp: 7391 case ARM::tInt_eh_sjlj_setjmp: 7392 case ARM::t2Int_eh_sjlj_setjmp: 7393 case ARM::t2Int_eh_sjlj_setjmp_nofp: 7394 EmitSjLjDispatchBlock(MI, BB); 7395 return BB; 7396 7397 case ARM::ABS: 7398 case ARM::t2ABS: { 7399 // To insert an ABS instruction, we have to insert the 7400 // diamond control-flow pattern. The incoming instruction knows the 7401 // source vreg to test against 0, the destination vreg to set, 7402 // the condition code register to branch on, the 7403 // true/false values to select between, and a branch opcode to use. 7404 // It transforms 7405 // V1 = ABS V0 7406 // into 7407 // V2 = MOVS V0 7408 // BCC (branch to SinkBB if V0 >= 0) 7409 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 7410 // SinkBB: V1 = PHI(V2, V3) 7411 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7412 MachineFunction::iterator BBI = BB; 7413 ++BBI; 7414 MachineFunction *Fn = BB->getParent(); 7415 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7416 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7417 Fn->insert(BBI, RSBBB); 7418 Fn->insert(BBI, SinkBB); 7419 7420 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 7421 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 7422 bool isThumb2 = Subtarget->isThumb2(); 7423 MachineRegisterInfo &MRI = Fn->getRegInfo(); 7424 // In Thumb mode S must not be specified if source register is the SP or 7425 // PC and if destination register is the SP, so restrict register class 7426 unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ? 7427 (const TargetRegisterClass*)&ARM::rGPRRegClass : 7428 (const TargetRegisterClass*)&ARM::GPRRegClass); 7429 7430 // Transfer the remainder of BB and its successor edges to sinkMBB. 7431 SinkBB->splice(SinkBB->begin(), BB, 7432 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 7433 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 7434 7435 BB->addSuccessor(RSBBB); 7436 BB->addSuccessor(SinkBB); 7437 7438 // fall through to SinkMBB 7439 RSBBB->addSuccessor(SinkBB); 7440 7441 // insert a cmp at the end of BB 7442 AddDefaultPred(BuildMI(BB, dl, 7443 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7444 .addReg(ABSSrcReg).addImm(0)); 7445 7446 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 7447 BuildMI(BB, dl, 7448 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 7449 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 7450 7451 // insert rsbri in RSBBB 7452 // Note: BCC and rsbri will be converted into predicated rsbmi 7453 // by if-conversion pass 7454 BuildMI(*RSBBB, RSBBB->begin(), dl, 7455 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 7456 .addReg(ABSSrcReg, RegState::Kill) 7457 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 7458 7459 // insert PHI in SinkBB, 7460 // reuse ABSDstReg to not change uses of ABS instruction 7461 BuildMI(*SinkBB, SinkBB->begin(), dl, 7462 TII->get(ARM::PHI), ABSDstReg) 7463 .addReg(NewRsbDstReg).addMBB(RSBBB) 7464 .addReg(ABSSrcReg).addMBB(BB); 7465 7466 // remove ABS instruction 7467 MI->eraseFromParent(); 7468 7469 // return last added BB 7470 return SinkBB; 7471 } 7472 case ARM::COPY_STRUCT_BYVAL_I32: 7473 ++NumLoopByVals; 7474 return EmitStructByval(MI, BB); 7475 case ARM::WIN__CHKSTK: 7476 return EmitLowered__chkstk(MI, BB); 7477 } 7478 } 7479 7480 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 7481 SDNode *Node) const { 7482 if (!MI->hasPostISelHook()) { 7483 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 7484 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 7485 return; 7486 } 7487 7488 const MCInstrDesc *MCID = &MI->getDesc(); 7489 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 7490 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 7491 // operand is still set to noreg. If needed, set the optional operand's 7492 // register to CPSR, and remove the redundant implicit def. 7493 // 7494 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 7495 7496 // Rename pseudo opcodes. 7497 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 7498 if (NewOpc) { 7499 const ARMBaseInstrInfo *TII = 7500 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 7501 MCID = &TII->get(NewOpc); 7502 7503 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 7504 "converted opcode should be the same except for cc_out"); 7505 7506 MI->setDesc(*MCID); 7507 7508 // Add the optional cc_out operand 7509 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 7510 } 7511 unsigned ccOutIdx = MCID->getNumOperands() - 1; 7512 7513 // Any ARM instruction that sets the 's' bit should specify an optional 7514 // "cc_out" operand in the last operand position. 7515 if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 7516 assert(!NewOpc && "Optional cc_out operand required"); 7517 return; 7518 } 7519 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 7520 // since we already have an optional CPSR def. 7521 bool definesCPSR = false; 7522 bool deadCPSR = false; 7523 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 7524 i != e; ++i) { 7525 const MachineOperand &MO = MI->getOperand(i); 7526 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 7527 definesCPSR = true; 7528 if (MO.isDead()) 7529 deadCPSR = true; 7530 MI->RemoveOperand(i); 7531 break; 7532 } 7533 } 7534 if (!definesCPSR) { 7535 assert(!NewOpc && "Optional cc_out operand required"); 7536 return; 7537 } 7538 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 7539 if (deadCPSR) { 7540 assert(!MI->getOperand(ccOutIdx).getReg() && 7541 "expect uninitialized optional cc_out operand"); 7542 return; 7543 } 7544 7545 // If this instruction was defined with an optional CPSR def and its dag node 7546 // had a live implicit CPSR def, then activate the optional CPSR def. 7547 MachineOperand &MO = MI->getOperand(ccOutIdx); 7548 MO.setReg(ARM::CPSR); 7549 MO.setIsDef(true); 7550 } 7551 7552 //===----------------------------------------------------------------------===// 7553 // ARM Optimization Hooks 7554 //===----------------------------------------------------------------------===// 7555 7556 // Helper function that checks if N is a null or all ones constant. 7557 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 7558 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N); 7559 if (!C) 7560 return false; 7561 return AllOnes ? C->isAllOnesValue() : C->isNullValue(); 7562 } 7563 7564 // Return true if N is conditionally 0 or all ones. 7565 // Detects these expressions where cc is an i1 value: 7566 // 7567 // (select cc 0, y) [AllOnes=0] 7568 // (select cc y, 0) [AllOnes=0] 7569 // (zext cc) [AllOnes=0] 7570 // (sext cc) [AllOnes=0/1] 7571 // (select cc -1, y) [AllOnes=1] 7572 // (select cc y, -1) [AllOnes=1] 7573 // 7574 // Invert is set when N is the null/all ones constant when CC is false. 7575 // OtherOp is set to the alternative value of N. 7576 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 7577 SDValue &CC, bool &Invert, 7578 SDValue &OtherOp, 7579 SelectionDAG &DAG) { 7580 switch (N->getOpcode()) { 7581 default: return false; 7582 case ISD::SELECT: { 7583 CC = N->getOperand(0); 7584 SDValue N1 = N->getOperand(1); 7585 SDValue N2 = N->getOperand(2); 7586 if (isZeroOrAllOnes(N1, AllOnes)) { 7587 Invert = false; 7588 OtherOp = N2; 7589 return true; 7590 } 7591 if (isZeroOrAllOnes(N2, AllOnes)) { 7592 Invert = true; 7593 OtherOp = N1; 7594 return true; 7595 } 7596 return false; 7597 } 7598 case ISD::ZERO_EXTEND: 7599 // (zext cc) can never be the all ones value. 7600 if (AllOnes) 7601 return false; 7602 // Fall through. 7603 case ISD::SIGN_EXTEND: { 7604 EVT VT = N->getValueType(0); 7605 CC = N->getOperand(0); 7606 if (CC.getValueType() != MVT::i1) 7607 return false; 7608 Invert = !AllOnes; 7609 if (AllOnes) 7610 // When looking for an AllOnes constant, N is an sext, and the 'other' 7611 // value is 0. 7612 OtherOp = DAG.getConstant(0, VT); 7613 else if (N->getOpcode() == ISD::ZERO_EXTEND) 7614 // When looking for a 0 constant, N can be zext or sext. 7615 OtherOp = DAG.getConstant(1, VT); 7616 else 7617 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 7618 return true; 7619 } 7620 } 7621 } 7622 7623 // Combine a constant select operand into its use: 7624 // 7625 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7626 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 7627 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 7628 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 7629 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 7630 // 7631 // The transform is rejected if the select doesn't have a constant operand that 7632 // is null, or all ones when AllOnes is set. 7633 // 7634 // Also recognize sext/zext from i1: 7635 // 7636 // (add (zext cc), x) -> (select cc (add x, 1), x) 7637 // (add (sext cc), x) -> (select cc (add x, -1), x) 7638 // 7639 // These transformations eventually create predicated instructions. 7640 // 7641 // @param N The node to transform. 7642 // @param Slct The N operand that is a select. 7643 // @param OtherOp The other N operand (x above). 7644 // @param DCI Context. 7645 // @param AllOnes Require the select constant to be all ones instead of null. 7646 // @returns The new node, or SDValue() on failure. 7647 static 7648 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 7649 TargetLowering::DAGCombinerInfo &DCI, 7650 bool AllOnes = false) { 7651 SelectionDAG &DAG = DCI.DAG; 7652 EVT VT = N->getValueType(0); 7653 SDValue NonConstantVal; 7654 SDValue CCOp; 7655 bool SwapSelectOps; 7656 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 7657 NonConstantVal, DAG)) 7658 return SDValue(); 7659 7660 // Slct is now know to be the desired identity constant when CC is true. 7661 SDValue TrueVal = OtherOp; 7662 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 7663 OtherOp, NonConstantVal); 7664 // Unless SwapSelectOps says CC should be false. 7665 if (SwapSelectOps) 7666 std::swap(TrueVal, FalseVal); 7667 7668 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 7669 CCOp, TrueVal, FalseVal); 7670 } 7671 7672 // Attempt combineSelectAndUse on each operand of a commutative operator N. 7673 static 7674 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 7675 TargetLowering::DAGCombinerInfo &DCI) { 7676 SDValue N0 = N->getOperand(0); 7677 SDValue N1 = N->getOperand(1); 7678 if (N0.getNode()->hasOneUse()) { 7679 SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes); 7680 if (Result.getNode()) 7681 return Result; 7682 } 7683 if (N1.getNode()->hasOneUse()) { 7684 SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes); 7685 if (Result.getNode()) 7686 return Result; 7687 } 7688 return SDValue(); 7689 } 7690 7691 // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 7692 // (only after legalization). 7693 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 7694 TargetLowering::DAGCombinerInfo &DCI, 7695 const ARMSubtarget *Subtarget) { 7696 7697 // Only perform optimization if after legalize, and if NEON is available. We 7698 // also expected both operands to be BUILD_VECTORs. 7699 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 7700 || N0.getOpcode() != ISD::BUILD_VECTOR 7701 || N1.getOpcode() != ISD::BUILD_VECTOR) 7702 return SDValue(); 7703 7704 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 7705 EVT VT = N->getValueType(0); 7706 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 7707 return SDValue(); 7708 7709 // Check that the vector operands are of the right form. 7710 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 7711 // operands, where N is the size of the formed vector. 7712 // Each EXTRACT_VECTOR should have the same input vector and odd or even 7713 // index such that we have a pair wise add pattern. 7714 7715 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 7716 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7717 return SDValue(); 7718 SDValue Vec = N0->getOperand(0)->getOperand(0); 7719 SDNode *V = Vec.getNode(); 7720 unsigned nextIndex = 0; 7721 7722 // For each operands to the ADD which are BUILD_VECTORs, 7723 // check to see if each of their operands are an EXTRACT_VECTOR with 7724 // the same vector and appropriate index. 7725 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 7726 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 7727 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7728 7729 SDValue ExtVec0 = N0->getOperand(i); 7730 SDValue ExtVec1 = N1->getOperand(i); 7731 7732 // First operand is the vector, verify its the same. 7733 if (V != ExtVec0->getOperand(0).getNode() || 7734 V != ExtVec1->getOperand(0).getNode()) 7735 return SDValue(); 7736 7737 // Second is the constant, verify its correct. 7738 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 7739 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 7740 7741 // For the constant, we want to see all the even or all the odd. 7742 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 7743 || C1->getZExtValue() != nextIndex+1) 7744 return SDValue(); 7745 7746 // Increment index. 7747 nextIndex+=2; 7748 } else 7749 return SDValue(); 7750 } 7751 7752 // Create VPADDL node. 7753 SelectionDAG &DAG = DCI.DAG; 7754 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7755 7756 // Build operand list. 7757 SmallVector<SDValue, 8> Ops; 7758 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 7759 TLI.getPointerTy())); 7760 7761 // Input is the vector. 7762 Ops.push_back(Vec); 7763 7764 // Get widened type and narrowed type. 7765 MVT widenType; 7766 unsigned numElem = VT.getVectorNumElements(); 7767 7768 EVT inputLaneType = Vec.getValueType().getVectorElementType(); 7769 switch (inputLaneType.getSimpleVT().SimpleTy) { 7770 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 7771 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 7772 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 7773 default: 7774 llvm_unreachable("Invalid vector element type for padd optimization."); 7775 } 7776 7777 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), widenType, Ops); 7778 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE; 7779 return DAG.getNode(ExtOp, SDLoc(N), VT, tmp); 7780 } 7781 7782 static SDValue findMUL_LOHI(SDValue V) { 7783 if (V->getOpcode() == ISD::UMUL_LOHI || 7784 V->getOpcode() == ISD::SMUL_LOHI) 7785 return V; 7786 return SDValue(); 7787 } 7788 7789 static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 7790 TargetLowering::DAGCombinerInfo &DCI, 7791 const ARMSubtarget *Subtarget) { 7792 7793 if (Subtarget->isThumb1Only()) return SDValue(); 7794 7795 // Only perform the checks after legalize when the pattern is available. 7796 if (DCI.isBeforeLegalize()) return SDValue(); 7797 7798 // Look for multiply add opportunities. 7799 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 7800 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 7801 // a glue link from the first add to the second add. 7802 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 7803 // a S/UMLAL instruction. 7804 // loAdd UMUL_LOHI 7805 // \ / :lo \ :hi 7806 // \ / \ [no multiline comment] 7807 // ADDC | hiAdd 7808 // \ :glue / / 7809 // \ / / 7810 // ADDE 7811 // 7812 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 7813 SDValue AddcOp0 = AddcNode->getOperand(0); 7814 SDValue AddcOp1 = AddcNode->getOperand(1); 7815 7816 // Check if the two operands are from the same mul_lohi node. 7817 if (AddcOp0.getNode() == AddcOp1.getNode()) 7818 return SDValue(); 7819 7820 assert(AddcNode->getNumValues() == 2 && 7821 AddcNode->getValueType(0) == MVT::i32 && 7822 "Expect ADDC with two result values. First: i32"); 7823 7824 // Check that we have a glued ADDC node. 7825 if (AddcNode->getValueType(1) != MVT::Glue) 7826 return SDValue(); 7827 7828 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 7829 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 7830 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 7831 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 7832 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 7833 return SDValue(); 7834 7835 // Look for the glued ADDE. 7836 SDNode* AddeNode = AddcNode->getGluedUser(); 7837 if (!AddeNode) 7838 return SDValue(); 7839 7840 // Make sure it is really an ADDE. 7841 if (AddeNode->getOpcode() != ISD::ADDE) 7842 return SDValue(); 7843 7844 assert(AddeNode->getNumOperands() == 3 && 7845 AddeNode->getOperand(2).getValueType() == MVT::Glue && 7846 "ADDE node has the wrong inputs"); 7847 7848 // Check for the triangle shape. 7849 SDValue AddeOp0 = AddeNode->getOperand(0); 7850 SDValue AddeOp1 = AddeNode->getOperand(1); 7851 7852 // Make sure that the ADDE operands are not coming from the same node. 7853 if (AddeOp0.getNode() == AddeOp1.getNode()) 7854 return SDValue(); 7855 7856 // Find the MUL_LOHI node walking up ADDE's operands. 7857 bool IsLeftOperandMUL = false; 7858 SDValue MULOp = findMUL_LOHI(AddeOp0); 7859 if (MULOp == SDValue()) 7860 MULOp = findMUL_LOHI(AddeOp1); 7861 else 7862 IsLeftOperandMUL = true; 7863 if (MULOp == SDValue()) 7864 return SDValue(); 7865 7866 // Figure out the right opcode. 7867 unsigned Opc = MULOp->getOpcode(); 7868 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 7869 7870 // Figure out the high and low input values to the MLAL node. 7871 SDValue* HiMul = &MULOp; 7872 SDValue* HiAdd = nullptr; 7873 SDValue* LoMul = nullptr; 7874 SDValue* LowAdd = nullptr; 7875 7876 if (IsLeftOperandMUL) 7877 HiAdd = &AddeOp1; 7878 else 7879 HiAdd = &AddeOp0; 7880 7881 7882 if (AddcOp0->getOpcode() == Opc) { 7883 LoMul = &AddcOp0; 7884 LowAdd = &AddcOp1; 7885 } 7886 if (AddcOp1->getOpcode() == Opc) { 7887 LoMul = &AddcOp1; 7888 LowAdd = &AddcOp0; 7889 } 7890 7891 if (!LoMul) 7892 return SDValue(); 7893 7894 if (LoMul->getNode() != HiMul->getNode()) 7895 return SDValue(); 7896 7897 // Create the merged node. 7898 SelectionDAG &DAG = DCI.DAG; 7899 7900 // Build operand list. 7901 SmallVector<SDValue, 8> Ops; 7902 Ops.push_back(LoMul->getOperand(0)); 7903 Ops.push_back(LoMul->getOperand(1)); 7904 Ops.push_back(*LowAdd); 7905 Ops.push_back(*HiAdd); 7906 7907 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode), 7908 DAG.getVTList(MVT::i32, MVT::i32), Ops); 7909 7910 // Replace the ADDs' nodes uses by the MLA node's values. 7911 SDValue HiMLALResult(MLALNode.getNode(), 1); 7912 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 7913 7914 SDValue LoMLALResult(MLALNode.getNode(), 0); 7915 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 7916 7917 // Return original node to notify the driver to stop replacing. 7918 SDValue resNode(AddcNode, 0); 7919 return resNode; 7920 } 7921 7922 /// PerformADDCCombine - Target-specific dag combine transform from 7923 /// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. 7924 static SDValue PerformADDCCombine(SDNode *N, 7925 TargetLowering::DAGCombinerInfo &DCI, 7926 const ARMSubtarget *Subtarget) { 7927 7928 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 7929 7930 } 7931 7932 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 7933 /// operands N0 and N1. This is a helper for PerformADDCombine that is 7934 /// called with the default operands, and if that fails, with commuted 7935 /// operands. 7936 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 7937 TargetLowering::DAGCombinerInfo &DCI, 7938 const ARMSubtarget *Subtarget){ 7939 7940 // Attempt to create vpaddl for this add. 7941 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 7942 if (Result.getNode()) 7943 return Result; 7944 7945 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7946 if (N0.getNode()->hasOneUse()) { 7947 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 7948 if (Result.getNode()) return Result; 7949 } 7950 return SDValue(); 7951 } 7952 7953 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 7954 /// 7955 static SDValue PerformADDCombine(SDNode *N, 7956 TargetLowering::DAGCombinerInfo &DCI, 7957 con