1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the PPCISelLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCISelLowering.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPCCallingConv.h" 17 #include "PPCMachineFunctionInfo.h" 18 #include "PPCPerfectShuffle.h" 19 #include "PPCTargetMachine.h" 20 #include "PPCTargetObjectFile.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/ADT/Triple.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineLoopInfo.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 32 #include "llvm/IR/CallingConv.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/Support/CommandLine.h" 38 #include "llvm/Support/ErrorHandling.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include "llvm/Target/TargetOptions.h" 42 43 using namespace llvm; 44 45 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 46 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 47 48 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 49 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 50 51 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 52 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 53 54 // FIXME: Remove this once the bug has been fixed! 55 extern cl::opt<bool> ANDIGlueBug; 56 57 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 58 const PPCSubtarget &STI) 59 : TargetLowering(TM), Subtarget(STI) { 60 // Use _setjmp/_longjmp instead of setjmp/longjmp. 61 setUseUnderscoreSetJmp(true); 62 setUseUnderscoreLongJmp(true); 63 64 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 65 // arguments are at least 4/8 bytes aligned. 66 bool isPPC64 = Subtarget.isPPC64(); 67 setMinStackArgumentAlignment(isPPC64 ? 8:4); 68 69 // Set up the register classes. 70 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 71 if (!Subtarget.useSoftFloat()) { 72 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 73 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 74 } 75 76 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD 77 for (MVT VT : MVT::integer_valuetypes()) { 78 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 79 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 80 } 81 82 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 83 84 // PowerPC has pre-inc load and store's. 85 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 86 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 87 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 88 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 89 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 90 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 91 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 92 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 93 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 94 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 95 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 96 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 97 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 98 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 99 100 if (Subtarget.useCRBits()) { 101 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 102 103 if (isPPC64 || Subtarget.hasFPCVT()) { 104 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 105 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 106 isPPC64 ? MVT::i64 : MVT::i32); 107 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 108 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 109 isPPC64 ? MVT::i64 : MVT::i32); 110 } else { 111 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 112 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 113 } 114 115 // PowerPC does not support direct load / store of condition registers 116 setOperationAction(ISD::LOAD, MVT::i1, Custom); 117 setOperationAction(ISD::STORE, MVT::i1, Custom); 118 119 // FIXME: Remove this once the ANDI glue bug is fixed: 120 if (ANDIGlueBug) 121 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 122 123 for (MVT VT : MVT::integer_valuetypes()) { 124 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 125 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 126 setTruncStoreAction(VT, MVT::i1, Expand); 127 } 128 129 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 130 } 131 132 // This is used in the ppcf128->int sequence. Note it has different semantics 133 // from FP_ROUND: that rounds to nearest, this rounds to zero. 134 setOperationAction(ISD::FP_ROUND_INREG, MVT::ppcf128, Custom); 135 136 // We do not currently implement these libm ops for PowerPC. 137 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 138 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 139 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 140 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 141 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 142 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 143 144 // PowerPC has no SREM/UREM instructions 145 setOperationAction(ISD::SREM, MVT::i32, Expand); 146 setOperationAction(ISD::UREM, MVT::i32, Expand); 147 setOperationAction(ISD::SREM, MVT::i64, Expand); 148 setOperationAction(ISD::UREM, MVT::i64, Expand); 149 150 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 151 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 152 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 153 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 154 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 155 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 156 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 157 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 158 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 159 160 // We don't support sin/cos/sqrt/fmod/pow 161 setOperationAction(ISD::FSIN , MVT::f64, Expand); 162 setOperationAction(ISD::FCOS , MVT::f64, Expand); 163 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 164 setOperationAction(ISD::FREM , MVT::f64, Expand); 165 setOperationAction(ISD::FPOW , MVT::f64, Expand); 166 setOperationAction(ISD::FMA , MVT::f64, Legal); 167 setOperationAction(ISD::FSIN , MVT::f32, Expand); 168 setOperationAction(ISD::FCOS , MVT::f32, Expand); 169 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 170 setOperationAction(ISD::FREM , MVT::f32, Expand); 171 setOperationAction(ISD::FPOW , MVT::f32, Expand); 172 setOperationAction(ISD::FMA , MVT::f32, Legal); 173 174 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 175 176 // If we're enabling GP optimizations, use hardware square root 177 if (!Subtarget.hasFSQRT() && 178 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 179 Subtarget.hasFRE())) 180 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 181 182 if (!Subtarget.hasFSQRT() && 183 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 184 Subtarget.hasFRES())) 185 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 186 187 if (Subtarget.hasFCPSGN()) { 188 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 189 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 190 } else { 191 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 192 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 193 } 194 195 if (Subtarget.hasFPRND()) { 196 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 197 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 198 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 199 setOperationAction(ISD::FROUND, MVT::f64, Legal); 200 201 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 202 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 203 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 204 setOperationAction(ISD::FROUND, MVT::f32, Legal); 205 } 206 207 // PowerPC does not have BSWAP, CTPOP or CTTZ 208 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 209 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 210 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 211 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 212 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 213 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 214 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 215 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); 216 217 if (Subtarget.hasPOPCNTD()) { 218 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 219 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 220 } else { 221 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 222 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 223 } 224 225 // PowerPC does not have ROTR 226 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 227 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 228 229 if (!Subtarget.useCRBits()) { 230 // PowerPC does not have Select 231 setOperationAction(ISD::SELECT, MVT::i32, Expand); 232 setOperationAction(ISD::SELECT, MVT::i64, Expand); 233 setOperationAction(ISD::SELECT, MVT::f32, Expand); 234 setOperationAction(ISD::SELECT, MVT::f64, Expand); 235 } 236 237 // PowerPC wants to turn select_cc of FP into fsel when possible. 238 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 239 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 240 241 // PowerPC wants to optimize integer setcc a bit 242 if (!Subtarget.useCRBits()) 243 setOperationAction(ISD::SETCC, MVT::i32, Custom); 244 245 // PowerPC does not have BRCOND which requires SetCC 246 if (!Subtarget.useCRBits()) 247 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 248 249 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 250 251 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 252 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 253 254 // PowerPC does not have [U|S]INT_TO_FP 255 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 256 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 257 258 if (Subtarget.hasDirectMove()) { 259 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 260 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 261 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 262 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 263 } else { 264 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 265 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 266 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 267 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 268 } 269 270 // We cannot sextinreg(i1). Expand to shifts. 271 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 272 273 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 274 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 275 // support continuation, user-level threading, and etc.. As a result, no 276 // other SjLj exception interfaces are implemented and please don't build 277 // your own exception handling based on them. 278 // LLVM/Clang supports zero-cost DWARF exception handling. 279 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 280 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 281 282 // We want to legalize GlobalAddress and ConstantPool nodes into the 283 // appropriate instructions to materialize the address. 284 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 285 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 286 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 287 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 288 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 289 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 290 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 291 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 292 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 293 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 294 295 // TRAP is legal. 296 setOperationAction(ISD::TRAP, MVT::Other, Legal); 297 298 // TRAMPOLINE is custom lowered. 299 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 300 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 301 302 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 303 setOperationAction(ISD::VASTART , MVT::Other, Custom); 304 305 if (Subtarget.isSVR4ABI()) { 306 if (isPPC64) { 307 // VAARG always uses double-word chunks, so promote anything smaller. 308 setOperationAction(ISD::VAARG, MVT::i1, Promote); 309 AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); 310 setOperationAction(ISD::VAARG, MVT::i8, Promote); 311 AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); 312 setOperationAction(ISD::VAARG, MVT::i16, Promote); 313 AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); 314 setOperationAction(ISD::VAARG, MVT::i32, Promote); 315 AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); 316 setOperationAction(ISD::VAARG, MVT::Other, Expand); 317 } else { 318 // VAARG is custom lowered with the 32-bit SVR4 ABI. 319 setOperationAction(ISD::VAARG, MVT::Other, Custom); 320 setOperationAction(ISD::VAARG, MVT::i64, Custom); 321 } 322 } else 323 setOperationAction(ISD::VAARG, MVT::Other, Expand); 324 325 if (Subtarget.isSVR4ABI() && !isPPC64) 326 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 327 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 328 else 329 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 330 331 // Use the default implementation. 332 setOperationAction(ISD::VAEND , MVT::Other, Expand); 333 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 334 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 335 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 336 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 337 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 338 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 339 340 // We want to custom lower some of our intrinsics. 341 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 342 343 // To handle counter-based loop conditions. 344 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 345 346 // Comparisons that require checking two conditions. 347 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 348 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 349 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 350 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 351 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 352 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 353 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 354 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 355 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 356 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 357 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 358 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 359 360 if (Subtarget.has64BitSupport()) { 361 // They also have instructions for converting between i64 and fp. 362 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 363 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 364 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 365 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 366 // This is just the low 32 bits of a (signed) fp->i64 conversion. 367 // We cannot do this with Promote because i64 is not a legal type. 368 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 369 370 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 371 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 372 } else { 373 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 374 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 375 } 376 377 // With the instructions enabled under FPCVT, we can do everything. 378 if (Subtarget.hasFPCVT()) { 379 if (Subtarget.has64BitSupport()) { 380 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 381 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 382 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 383 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 384 } 385 386 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 387 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 388 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 389 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 390 } 391 392 if (Subtarget.use64BitRegs()) { 393 // 64-bit PowerPC implementations can support i64 types directly 394 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 395 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 396 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 397 // 64-bit PowerPC wants to expand i128 shifts itself. 398 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 399 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 400 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 401 } else { 402 // 32-bit PowerPC wants to expand i64 shifts itself. 403 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 404 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 405 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 406 } 407 408 if (Subtarget.hasAltivec()) { 409 // First set operation action for all vector types to expand. Then we 410 // will selectively turn on ones that can be effectively codegen'd. 411 for (MVT VT : MVT::vector_valuetypes()) { 412 // add/sub are legal for all supported vector VT's. 413 setOperationAction(ISD::ADD, VT, Legal); 414 setOperationAction(ISD::SUB, VT, Legal); 415 416 // Vector instructions introduced in P8 417 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 418 setOperationAction(ISD::CTPOP, VT, Legal); 419 setOperationAction(ISD::CTLZ, VT, Legal); 420 } 421 else { 422 setOperationAction(ISD::CTPOP, VT, Expand); 423 setOperationAction(ISD::CTLZ, VT, Expand); 424 } 425 426 // We promote all shuffles to v16i8. 427 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 428 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 429 430 // We promote all non-typed operations to v4i32. 431 setOperationAction(ISD::AND , VT, Promote); 432 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 433 setOperationAction(ISD::OR , VT, Promote); 434 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 435 setOperationAction(ISD::XOR , VT, Promote); 436 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 437 setOperationAction(ISD::LOAD , VT, Promote); 438 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 439 setOperationAction(ISD::SELECT, VT, Promote); 440 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 441 setOperationAction(ISD::SELECT_CC, VT, Promote); 442 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 443 setOperationAction(ISD::STORE, VT, Promote); 444 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 445 446 // No other operations are legal. 447 setOperationAction(ISD::MUL , VT, Expand); 448 setOperationAction(ISD::SDIV, VT, Expand); 449 setOperationAction(ISD::SREM, VT, Expand); 450 setOperationAction(ISD::UDIV, VT, Expand); 451 setOperationAction(ISD::UREM, VT, Expand); 452 setOperationAction(ISD::FDIV, VT, Expand); 453 setOperationAction(ISD::FREM, VT, Expand); 454 setOperationAction(ISD::FNEG, VT, Expand); 455 setOperationAction(ISD::FSQRT, VT, Expand); 456 setOperationAction(ISD::FLOG, VT, Expand); 457 setOperationAction(ISD::FLOG10, VT, Expand); 458 setOperationAction(ISD::FLOG2, VT, Expand); 459 setOperationAction(ISD::FEXP, VT, Expand); 460 setOperationAction(ISD::FEXP2, VT, Expand); 461 setOperationAction(ISD::FSIN, VT, Expand); 462 setOperationAction(ISD::FCOS, VT, Expand); 463 setOperationAction(ISD::FABS, VT, Expand); 464 setOperationAction(ISD::FPOWI, VT, Expand); 465 setOperationAction(ISD::FFLOOR, VT, Expand); 466 setOperationAction(ISD::FCEIL, VT, Expand); 467 setOperationAction(ISD::FTRUNC, VT, Expand); 468 setOperationAction(ISD::FRINT, VT, Expand); 469 setOperationAction(ISD::FNEARBYINT, VT, Expand); 470 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 471 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 472 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 473 setOperationAction(ISD::MULHU, VT, Expand); 474 setOperationAction(ISD::MULHS, VT, Expand); 475 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 476 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 477 setOperationAction(ISD::UDIVREM, VT, Expand); 478 setOperationAction(ISD::SDIVREM, VT, Expand); 479 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 480 setOperationAction(ISD::FPOW, VT, Expand); 481 setOperationAction(ISD::BSWAP, VT, Expand); 482 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 483 setOperationAction(ISD::CTTZ, VT, Expand); 484 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 485 setOperationAction(ISD::VSELECT, VT, Expand); 486 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 487 setOperationAction(ISD::ROTL, VT, Expand); 488 setOperationAction(ISD::ROTR, VT, Expand); 489 490 for (MVT InnerVT : MVT::vector_valuetypes()) { 491 setTruncStoreAction(VT, InnerVT, Expand); 492 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 493 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 494 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 495 } 496 } 497 498 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 499 // with merges, splats, etc. 500 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 501 502 setOperationAction(ISD::AND , MVT::v4i32, Legal); 503 setOperationAction(ISD::OR , MVT::v4i32, Legal); 504 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 505 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 506 setOperationAction(ISD::SELECT, MVT::v4i32, 507 Subtarget.useCRBits() ? Legal : Expand); 508 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 509 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 510 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 511 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 512 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 513 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 514 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 515 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 516 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 517 518 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 519 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 520 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 521 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 522 523 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 524 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 525 526 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 527 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 528 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 529 } 530 531 if (Subtarget.hasP8Altivec()) 532 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 533 else 534 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 535 536 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 537 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 538 539 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 540 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 541 542 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 543 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 544 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 545 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 546 547 // Altivec does not contain unordered floating-point compare instructions 548 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 549 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 550 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 551 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 552 553 if (Subtarget.hasVSX()) { 554 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 555 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 556 if (Subtarget.hasP8Vector()) { 557 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 558 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 559 } 560 if (Subtarget.hasDirectMove()) { 561 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 562 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 563 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 564 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 565 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 566 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 567 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 568 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 569 } 570 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 571 572 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 573 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 574 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 575 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 576 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 577 578 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 579 580 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 581 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 582 583 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 584 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 585 586 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); 587 setOperationAction(ISD::VSELECT, MVT::v8i16, Legal); 588 setOperationAction(ISD::VSELECT, MVT::v4i32, Legal); 589 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 590 setOperationAction(ISD::VSELECT, MVT::v2f64, Legal); 591 592 // Share the Altivec comparison restrictions. 593 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 594 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 595 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 596 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 597 598 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 599 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 600 601 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 602 603 if (Subtarget.hasP8Vector()) 604 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 605 606 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 607 608 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 609 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 610 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 611 612 if (Subtarget.hasP8Altivec()) { 613 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 614 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 615 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 616 617 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 618 } 619 else { 620 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 621 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 622 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 623 624 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 625 626 // VSX v2i64 only supports non-arithmetic operations. 627 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 628 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 629 } 630 631 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 632 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 633 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 634 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 635 636 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 637 638 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 639 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 640 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 641 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 642 643 // Vector operation legalization checks the result type of 644 // SIGN_EXTEND_INREG, overall legalization checks the inner type. 645 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 646 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 647 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom); 648 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom); 649 650 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 651 } 652 653 if (Subtarget.hasP8Altivec()) { 654 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 655 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 656 } 657 } 658 659 if (Subtarget.hasQPX()) { 660 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 661 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 662 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 663 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 664 665 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 666 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 667 668 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 669 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 670 671 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 672 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 673 674 if (!Subtarget.useCRBits()) 675 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 676 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 677 678 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 679 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 680 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 681 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 682 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 683 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 684 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 685 686 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 687 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 688 689 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 690 setOperationAction(ISD::FP_ROUND_INREG , MVT::v4f32, Expand); 691 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 692 693 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 694 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 695 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 696 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 697 setOperationAction(ISD::FPOWI , MVT::v4f64, Expand); 698 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 699 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 700 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 701 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 702 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 703 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 704 705 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 706 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 707 708 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 709 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 710 711 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 712 713 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 714 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 715 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 716 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 717 718 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 719 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 720 721 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 722 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 723 724 if (!Subtarget.useCRBits()) 725 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 726 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 727 728 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 729 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 730 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 731 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 732 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 733 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 734 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 735 736 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 737 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 738 739 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 740 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 741 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 742 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 743 setOperationAction(ISD::FPOWI , MVT::v4f32, Expand); 744 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 745 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 746 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 747 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 748 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 749 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 750 751 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 752 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 753 754 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 755 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 756 757 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 758 759 setOperationAction(ISD::AND , MVT::v4i1, Legal); 760 setOperationAction(ISD::OR , MVT::v4i1, Legal); 761 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 762 763 if (!Subtarget.useCRBits()) 764 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 765 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 766 767 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 768 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 769 770 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 771 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 772 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 773 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 774 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 775 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 776 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 777 778 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 779 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 780 781 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 782 783 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 784 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 785 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 786 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 787 788 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 789 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 790 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 791 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 792 793 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 794 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 795 796 // These need to set FE_INEXACT, and so cannot be vectorized here. 797 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 798 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 799 800 if (TM.Options.UnsafeFPMath) { 801 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 802 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 803 804 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 805 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 806 } else { 807 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 808 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 809 810 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 811 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 812 } 813 } 814 815 if (Subtarget.has64BitSupport()) 816 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 817 818 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 819 820 if (!isPPC64) { 821 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 822 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 823 } 824 825 setBooleanContents(ZeroOrOneBooleanContent); 826 827 if (Subtarget.hasAltivec()) { 828 // Altivec instructions set fields to all zeros or all ones. 829 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 830 } 831 832 if (!isPPC64) { 833 // These libcalls are not available in 32-bit. 834 setLibcallName(RTLIB::SHL_I128, nullptr); 835 setLibcallName(RTLIB::SRL_I128, nullptr); 836 setLibcallName(RTLIB::SRA_I128, nullptr); 837 } 838 839 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 840 841 // We have target-specific dag combine patterns for the following nodes: 842 setTargetDAGCombine(ISD::SINT_TO_FP); 843 if (Subtarget.hasFPCVT()) 844 setTargetDAGCombine(ISD::UINT_TO_FP); 845 setTargetDAGCombine(ISD::LOAD); 846 setTargetDAGCombine(ISD::STORE); 847 setTargetDAGCombine(ISD::BR_CC); 848 if (Subtarget.useCRBits()) 849 setTargetDAGCombine(ISD::BRCOND); 850 setTargetDAGCombine(ISD::BSWAP); 851 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 852 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 853 setTargetDAGCombine(ISD::INTRINSIC_VOID); 854 855 setTargetDAGCombine(ISD::SIGN_EXTEND); 856 setTargetDAGCombine(ISD::ZERO_EXTEND); 857 setTargetDAGCombine(ISD::ANY_EXTEND); 858 859 if (Subtarget.useCRBits()) { 860 setTargetDAGCombine(ISD::TRUNCATE); 861 setTargetDAGCombine(ISD::SETCC); 862 setTargetDAGCombine(ISD::SELECT_CC); 863 } 864 865 // Use reciprocal estimates. 866 if (TM.Options.UnsafeFPMath) { 867 setTargetDAGCombine(ISD::FDIV); 868 setTargetDAGCombine(ISD::FSQRT); 869 } 870 871 // Darwin long double math library functions have $LDBL128 appended. 872 if (Subtarget.isDarwin()) { 873 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 874 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 875 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 876 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 877 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 878 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 879 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 880 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 881 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 882 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 883 } 884 885 // With 32 condition bits, we don't need to sink (and duplicate) compares 886 // aggressively in CodeGenPrep. 887 if (Subtarget.useCRBits()) { 888 setHasMultipleConditionRegisters(); 889 setJumpIsExpensive(); 890 } 891 892 setMinFunctionAlignment(2); 893 if (Subtarget.isDarwin()) 894 setPrefFunctionAlignment(4); 895 896 switch (Subtarget.getDarwinDirective()) { 897 default: break; 898 case PPC::DIR_970: 899 case PPC::DIR_A2: 900 case PPC::DIR_E500mc: 901 case PPC::DIR_E5500: 902 case PPC::DIR_PWR4: 903 case PPC::DIR_PWR5: 904 case PPC::DIR_PWR5X: 905 case PPC::DIR_PWR6: 906 case PPC::DIR_PWR6X: 907 case PPC::DIR_PWR7: 908 case PPC::DIR_PWR8: 909 setPrefFunctionAlignment(4); 910 setPrefLoopAlignment(4); 911 break; 912 } 913 914 setInsertFencesForAtomic(true); 915 916 if (Subtarget.enableMachineScheduler()) 917 setSchedulingPreference(Sched::Source); 918 else 919 setSchedulingPreference(Sched::Hybrid); 920 921 computeRegisterProperties(STI.getRegisterInfo()); 922 923 // The Freescale cores do better with aggressive inlining of memcpy and 924 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 925 if (Subtarget.getDarwinDirective() == PPC::DIR_E500mc || 926 Subtarget.getDarwinDirective() == PPC::DIR_E5500) { 927 MaxStoresPerMemset = 32; 928 MaxStoresPerMemsetOptSize = 16; 929 MaxStoresPerMemcpy = 32; 930 MaxStoresPerMemcpyOptSize = 8; 931 MaxStoresPerMemmove = 32; 932 MaxStoresPerMemmoveOptSize = 8; 933 } else if (Subtarget.getDarwinDirective() == PPC::DIR_A2) { 934 // The A2 also benefits from (very) aggressive inlining of memcpy and 935 // friends. The overhead of a the function call, even when warm, can be 936 // over one hundred cycles. 937 MaxStoresPerMemset = 128; 938 MaxStoresPerMemcpy = 128; 939 MaxStoresPerMemmove = 128; 940 } 941 } 942 943 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 944 /// the desired ByVal argument alignment. 945 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 946 unsigned MaxMaxAlign) { 947 if (MaxAlign == MaxMaxAlign) 948 return; 949 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 950 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 951 MaxAlign = 32; 952 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 953 MaxAlign = 16; 954 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 955 unsigned EltAlign = 0; 956 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 957 if (EltAlign > MaxAlign) 958 MaxAlign = EltAlign; 959 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 960 for (auto *EltTy : STy->elements()) { 961 unsigned EltAlign = 0; 962 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 963 if (EltAlign > MaxAlign) 964 MaxAlign = EltAlign; 965 if (MaxAlign == MaxMaxAlign) 966 break; 967 } 968 } 969 } 970 971 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 972 /// function arguments in the caller parameter area. 973 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 974 const DataLayout &DL) const { 975 // Darwin passes everything on 4 byte boundary. 976 if (Subtarget.isDarwin()) 977 return 4; 978 979 // 16byte and wider vectors are passed on 16byte boundary. 980 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 981 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 982 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 983 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 984 return Align; 985 } 986 987 bool PPCTargetLowering::useSoftFloat() const { 988 return Subtarget.useSoftFloat(); 989 } 990 991 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 992 switch ((PPCISD::NodeType)Opcode) { 993 case PPCISD::FIRST_NUMBER: break; 994 case PPCISD::FSEL: return "PPCISD::FSEL"; 995 case PPCISD::FCFID: return "PPCISD::FCFID"; 996 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 997 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 998 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 999 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1000 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1001 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1002 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1003 case PPCISD::FRE: return "PPCISD::FRE"; 1004 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1005 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1006 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1007 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1008 case PPCISD::VPERM: return "PPCISD::VPERM"; 1009 case PPCISD::CMPB: return "PPCISD::CMPB"; 1010 case PPCISD::Hi: return "PPCISD::Hi"; 1011 case PPCISD::Lo: return "PPCISD::Lo"; 1012 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1013 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1014 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1015 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1016 case PPCISD::SRL: return "PPCISD::SRL"; 1017 case PPCISD::SRA: return "PPCISD::SRA"; 1018 case PPCISD::SHL: return "PPCISD::SHL"; 1019 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1020 case PPCISD::CALL: return "PPCISD::CALL"; 1021 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1022 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1023 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1024 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1025 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1026 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1027 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1028 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1029 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1030 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1031 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1032 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1033 case PPCISD::ANDIo_1_EQ_BIT: return "PPCISD::ANDIo_1_EQ_BIT"; 1034 case PPCISD::ANDIo_1_GT_BIT: return "PPCISD::ANDIo_1_GT_BIT"; 1035 case PPCISD::VCMP: return "PPCISD::VCMP"; 1036 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1037 case PPCISD::LBRX: return "PPCISD::LBRX"; 1038 case PPCISD::STBRX: return "PPCISD::STBRX"; 1039 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1040 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1041 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1042 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1043 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1044 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1045 case PPCISD::BDZ: return "PPCISD::BDZ"; 1046 case PPCISD::MFFS: return "PPCISD::MFFS"; 1047 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1048 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1049 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1050 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1051 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1052 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1053 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1054 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1055 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1056 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1057 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1058 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1059 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1060 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1061 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1062 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1063 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1064 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1065 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1066 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1067 case PPCISD::SC: return "PPCISD::SC"; 1068 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1069 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1070 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1071 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1072 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1073 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1074 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1075 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1076 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1077 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1078 } 1079 return nullptr; 1080 } 1081 1082 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1083 EVT VT) const { 1084 if (!VT.isVector()) 1085 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1086 1087 if (Subtarget.hasQPX()) 1088 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1089 1090 return VT.changeVectorElementTypeToInteger(); 1091 } 1092 1093 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1094 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1095 return true; 1096 } 1097 1098 //===----------------------------------------------------------------------===// 1099 // Node matching predicates, for use by the tblgen matching code. 1100 //===----------------------------------------------------------------------===// 1101 1102 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1103 static bool isFloatingPointZero(SDValue Op) { 1104 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1105 return CFP->getValueAPF().isZero(); 1106 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1107 // Maybe this has already been legalized into the constant pool? 1108 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1109 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1110 return CFP->getValueAPF().isZero(); 1111 } 1112 return false; 1113 } 1114 1115 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1116 /// true if Op is undef or if it matches the specified value. 1117 static bool isConstantOrUndef(int Op, int Val) { 1118 return Op < 0 || Op == Val; 1119 } 1120 1121 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1122 /// VPKUHUM instruction. 1123 /// The ShuffleKind distinguishes between big-endian operations with 1124 /// two different inputs (0), either-endian operations with two identical 1125 /// inputs (1), and little-endian operations with two different inputs (2). 1126 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1127 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1128 SelectionDAG &DAG) { 1129 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1130 if (ShuffleKind == 0) { 1131 if (IsLE) 1132 return false; 1133 for (unsigned i = 0; i != 16; ++i) 1134 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1135 return false; 1136 } else if (ShuffleKind == 2) { 1137 if (!IsLE) 1138 return false; 1139 for (unsigned i = 0; i != 16; ++i) 1140 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1141 return false; 1142 } else if (ShuffleKind == 1) { 1143 unsigned j = IsLE ? 0 : 1; 1144 for (unsigned i = 0; i != 8; ++i) 1145 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1146 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1147 return false; 1148 } 1149 return true; 1150 } 1151 1152 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1153 /// VPKUWUM instruction. 1154 /// The ShuffleKind distinguishes between big-endian operations with 1155 /// two different inputs (0), either-endian operations with two identical 1156 /// inputs (1), and little-endian operations with two different inputs (2). 1157 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1158 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1159 SelectionDAG &DAG) { 1160 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1161 if (ShuffleKind == 0) { 1162 if (IsLE) 1163 return false; 1164 for (unsigned i = 0; i != 16; i += 2) 1165 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1166 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1167 return false; 1168 } else if (ShuffleKind == 2) { 1169 if (!IsLE) 1170 return false; 1171 for (unsigned i = 0; i != 16; i += 2) 1172 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1173 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1174 return false; 1175 } else if (ShuffleKind == 1) { 1176 unsigned j = IsLE ? 0 : 2; 1177 for (unsigned i = 0; i != 8; i += 2) 1178 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1179 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1180 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1181 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1182 return false; 1183 } 1184 return true; 1185 } 1186 1187 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1188 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1189 /// current subtarget. 1190 /// 1191 /// The ShuffleKind distinguishes between big-endian operations with 1192 /// two different inputs (0), either-endian operations with two identical 1193 /// inputs (1), and little-endian operations with two different inputs (2). 1194 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1195 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1196 SelectionDAG &DAG) { 1197 const PPCSubtarget& Subtarget = 1198 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1199 if (!Subtarget.hasP8Vector()) 1200 return false; 1201 1202 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1203 if (ShuffleKind == 0) { 1204 if (IsLE) 1205 return false; 1206 for (unsigned i = 0; i != 16; i += 4) 1207 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1208 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1209 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1210 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1211 return false; 1212 } else if (ShuffleKind == 2) { 1213 if (!IsLE) 1214 return false; 1215 for (unsigned i = 0; i != 16; i += 4) 1216 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1217 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1218 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1219 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1220 return false; 1221 } else if (ShuffleKind == 1) { 1222 unsigned j = IsLE ? 0 : 4; 1223 for (unsigned i = 0; i != 8; i += 4) 1224 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1225 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1226 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1227 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1228 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1229 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1230 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1231 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1232 return false; 1233 } 1234 return true; 1235 } 1236 1237 /// isVMerge - Common function, used to match vmrg* shuffles. 1238 /// 1239 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1240 unsigned LHSStart, unsigned RHSStart) { 1241 if (N->getValueType(0) != MVT::v16i8) 1242 return false; 1243 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1244 "Unsupported merge size!"); 1245 1246 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1247 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1248 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1249 LHSStart+j+i*UnitSize) || 1250 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1251 RHSStart+j+i*UnitSize)) 1252 return false; 1253 } 1254 return true; 1255 } 1256 1257 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1258 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1259 /// The ShuffleKind distinguishes between big-endian merges with two 1260 /// different inputs (0), either-endian merges with two identical inputs (1), 1261 /// and little-endian merges with two different inputs (2). For the latter, 1262 /// the input operands are swapped (see PPCInstrAltivec.td). 1263 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1264 unsigned ShuffleKind, SelectionDAG &DAG) { 1265 if (DAG.getDataLayout().isLittleEndian()) { 1266 if (ShuffleKind == 1) // unary 1267 return isVMerge(N, UnitSize, 0, 0); 1268 else if (ShuffleKind == 2) // swapped 1269 return isVMerge(N, UnitSize, 0, 16); 1270 else 1271 return false; 1272 } else { 1273 if (ShuffleKind == 1) // unary 1274 return isVMerge(N, UnitSize, 8, 8); 1275 else if (ShuffleKind == 0) // normal 1276 return isVMerge(N, UnitSize, 8, 24); 1277 else 1278 return false; 1279 } 1280 } 1281 1282 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1283 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1284 /// The ShuffleKind distinguishes between big-endian merges with two 1285 /// different inputs (0), either-endian merges with two identical inputs (1), 1286 /// and little-endian merges with two different inputs (2). For the latter, 1287 /// the input operands are swapped (see PPCInstrAltivec.td). 1288 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1289 unsigned ShuffleKind, SelectionDAG &DAG) { 1290 if (DAG.getDataLayout().isLittleEndian()) { 1291 if (ShuffleKind == 1) // unary 1292 return isVMerge(N, UnitSize, 8, 8); 1293 else if (ShuffleKind == 2) // swapped 1294 return isVMerge(N, UnitSize, 8, 24); 1295 else 1296 return false; 1297 } else { 1298 if (ShuffleKind == 1) // unary 1299 return isVMerge(N, UnitSize, 0, 0); 1300 else if (ShuffleKind == 0) // normal 1301 return isVMerge(N, UnitSize, 0, 16); 1302 else 1303 return false; 1304 } 1305 } 1306 1307 /** 1308 * \brief Common function used to match vmrgew and vmrgow shuffles 1309 * 1310 * The indexOffset determines whether to look for even or odd words in 1311 * the shuffle mask. This is based on the of the endianness of the target 1312 * machine. 1313 * - Little Endian: 1314 * - Use offset of 0 to check for odd elements 1315 * - Use offset of 4 to check for even elements 1316 * - Big Endian: 1317 * - Use offset of 0 to check for even elements 1318 * - Use offset of 4 to check for odd elements 1319 * A detailed description of the vector element ordering for little endian and 1320 * big endian can be found at 1321 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1322 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1323 * compiler differences mean to you 1324 * 1325 * The mask to the shuffle vector instruction specifies the indices of the 1326 * elements from the two input vectors to place in the result. The elements are 1327 * numbered in array-access order, starting with the first vector. These vectors 1328 * are always of type v16i8, thus each vector will contain 16 elements of size 1329 * 8. More info on the shuffle vector can be found in the 1330 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1331 * Language Reference. 1332 * 1333 * The RHSStartValue indicates whether the same input vectors are used (unary) 1334 * or two different input vectors are used, based on the following: 1335 * - If the instruction uses the same vector for both inputs, the range of the 1336 * indices will be 0 to 15. In this case, the RHSStart value passed should 1337 * be 0. 1338 * - If the instruction has two different vectors then the range of the 1339 * indices will be 0 to 31. In this case, the RHSStart value passed should 1340 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1341 * to 31 specify elements in the second vector). 1342 * 1343 * \param[in] N The shuffle vector SD Node to analyze 1344 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1345 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1346 * vector to the shuffle_vector instruction 1347 * \return true iff this shuffle vector represents an even or odd word merge 1348 */ 1349 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1350 unsigned RHSStartValue) { 1351 if (N->getValueType(0) != MVT::v16i8) 1352 return false; 1353 1354 for (unsigned i = 0; i < 2; ++i) 1355 for (unsigned j = 0; j < 4; ++j) 1356 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1357 i*RHSStartValue+j+IndexOffset) || 1358 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1359 i*RHSStartValue+j+IndexOffset+8)) 1360 return false; 1361 return true; 1362 } 1363 1364 /** 1365 * \brief Determine if the specified shuffle mask is suitable for the vmrgew or 1366 * vmrgow instructions. 1367 * 1368 * \param[in] N The shuffle vector SD Node to analyze 1369 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1370 * \param[in] ShuffleKind Identify the type of merge: 1371 * - 0 = big-endian merge with two different inputs; 1372 * - 1 = either-endian merge with two identical inputs; 1373 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1374 * little-endian merges). 1375 * \param[in] DAG The current SelectionDAG 1376 * \return true iff this shuffle mask 1377 */ 1378 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1379 unsigned ShuffleKind, SelectionDAG &DAG) { 1380 if (DAG.getDataLayout().isLittleEndian()) { 1381 unsigned indexOffset = CheckEven ? 4 : 0; 1382 if (ShuffleKind == 1) // Unary 1383 return isVMerge(N, indexOffset, 0); 1384 else if (ShuffleKind == 2) // swapped 1385 return isVMerge(N, indexOffset, 16); 1386 else 1387 return false; 1388 } 1389 else { 1390 unsigned indexOffset = CheckEven ? 0 : 4; 1391 if (ShuffleKind == 1) // Unary 1392 return isVMerge(N, indexOffset, 0); 1393 else if (ShuffleKind == 0) // Normal 1394 return isVMerge(N, indexOffset, 16); 1395 else 1396 return false; 1397 } 1398 return false; 1399 } 1400 1401 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1402 /// amount, otherwise return -1. 1403 /// The ShuffleKind distinguishes between big-endian operations with two 1404 /// different inputs (0), either-endian operations with two identical inputs 1405 /// (1), and little-endian operations with two different inputs (2). For the 1406 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1407 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1408 SelectionDAG &DAG) { 1409 if (N->getValueType(0) != MVT::v16i8) 1410 return -1; 1411 1412 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1413 1414 // Find the first non-undef value in the shuffle mask. 1415 unsigned i; 1416 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1417 /*search*/; 1418 1419 if (i == 16) return -1; // all undef. 1420 1421 // Otherwise, check to see if the rest of the elements are consecutively 1422 // numbered from this value. 1423 unsigned ShiftAmt = SVOp->getMaskElt(i); 1424 if (ShiftAmt < i) return -1; 1425 1426 ShiftAmt -= i; 1427 bool isLE = DAG.getDataLayout().isLittleEndian(); 1428 1429 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1430 // Check the rest of the elements to see if they are consecutive. 1431 for (++i; i != 16; ++i) 1432 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1433 return -1; 1434 } else if (ShuffleKind == 1) { 1435 // Check the rest of the elements to see if they are consecutive. 1436 for (++i; i != 16; ++i) 1437 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1438 return -1; 1439 } else 1440 return -1; 1441 1442 if (isLE) 1443 ShiftAmt = 16 - ShiftAmt; 1444 1445 return ShiftAmt; 1446 } 1447 1448 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1449 /// specifies a splat of a single element that is suitable for input to 1450 /// VSPLTB/VSPLTH/VSPLTW. 1451 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1452 assert(N->getValueType(0) == MVT::v16i8 && 1453 (EltSize == 1 || EltSize == 2 || EltSize == 4)); 1454 1455 // The consecutive indices need to specify an element, not part of two 1456 // different elements. So abandon ship early if this isn't the case. 1457 if (N->getMaskElt(0) % EltSize != 0) 1458 return false; 1459 1460 // This is a splat operation if each element of the permute is the same, and 1461 // if the value doesn't reference the second vector. 1462 unsigned ElementBase = N->getMaskElt(0); 1463 1464 // FIXME: Handle UNDEF elements too! 1465 if (ElementBase >= 16) 1466 return false; 1467 1468 // Check that the indices are consecutive, in the case of a multi-byte element 1469 // splatted with a v16i8 mask. 1470 for (unsigned i = 1; i != EltSize; ++i) 1471 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1472 return false; 1473 1474 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1475 if (N->getMaskElt(i) < 0) continue; 1476 for (unsigned j = 0; j != EltSize; ++j) 1477 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1478 return false; 1479 } 1480 return true; 1481 } 1482 1483 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the 1484 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask. 1485 unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize, 1486 SelectionDAG &DAG) { 1487 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1488 assert(isSplatShuffleMask(SVOp, EltSize)); 1489 if (DAG.getDataLayout().isLittleEndian()) 1490 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 1491 else 1492 return SVOp->getMaskElt(0) / EltSize; 1493 } 1494 1495 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 1496 /// by using a vspltis[bhw] instruction of the specified element size, return 1497 /// the constant being splatted. The ByteSize field indicates the number of 1498 /// bytes of each element [124] -> [bhw]. 1499 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 1500 SDValue OpVal(nullptr, 0); 1501 1502 // If ByteSize of the splat is bigger than the element size of the 1503 // build_vector, then we have a case where we are checking for a splat where 1504 // multiple elements of the buildvector are folded together into a single 1505 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 1506 unsigned EltSize = 16/N->getNumOperands(); 1507 if (EltSize < ByteSize) { 1508 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 1509 SDValue UniquedVals[4]; 1510 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 1511 1512 // See if all of the elements in the buildvector agree across. 1513 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1514 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1515 // If the element isn't a constant, bail fully out. 1516 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 1517 1518 1519 if (!UniquedVals[i&(Multiple-1)].getNode()) 1520 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 1521 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 1522 return SDValue(); // no match. 1523 } 1524 1525 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 1526 // either constant or undef values that are identical for each chunk. See 1527 // if these chunks can form into a larger vspltis*. 1528 1529 // Check to see if all of the leading entries are either 0 or -1. If 1530 // neither, then this won't fit into the immediate field. 1531 bool LeadingZero = true; 1532 bool LeadingOnes = true; 1533 for (unsigned i = 0; i != Multiple-1; ++i) { 1534 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 1535 1536 LeadingZero &= isNullConstant(UniquedVals[i]); 1537 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 1538 } 1539 // Finally, check the least significant entry. 1540 if (LeadingZero) { 1541 if (!UniquedVals[Multiple-1].getNode()) 1542 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 1543 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 1544 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 1545 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1546 } 1547 if (LeadingOnes) { 1548 if (!UniquedVals[Multiple-1].getNode()) 1549 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 1550 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 1551 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 1552 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 1553 } 1554 1555 return SDValue(); 1556 } 1557 1558 // Check to see if this buildvec has a single non-undef value in its elements. 1559 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 1560 if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; 1561 if (!OpVal.getNode()) 1562 OpVal = N->getOperand(i); 1563 else if (OpVal != N->getOperand(i)) 1564 return SDValue(); 1565 } 1566 1567 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 1568 1569 unsigned ValSizeInBytes = EltSize; 1570 uint64_t Value = 0; 1571 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 1572 Value = CN->getZExtValue(); 1573 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 1574 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 1575 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 1576 } 1577 1578 // If the splat value is larger than the element value, then we can never do 1579 // this splat. The only case that we could fit the replicated bits into our 1580 // immediate field for would be zero, and we prefer to use vxor for it. 1581 if (ValSizeInBytes < ByteSize) return SDValue(); 1582 1583 // If the element value is larger than the splat value, check if it consists 1584 // of a repeated bit pattern of size ByteSize. 1585 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 1586 return SDValue(); 1587 1588 // Properly sign extend the value. 1589 int MaskVal = SignExtend32(Value, ByteSize * 8); 1590 1591 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 1592 if (MaskVal == 0) return SDValue(); 1593 1594 // Finally, if this value fits in a 5 bit sext field, return it 1595 if (SignExtend32<5>(MaskVal) == MaskVal) 1596 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 1597 return SDValue(); 1598 } 1599 1600 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 1601 /// amount, otherwise return -1. 1602 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 1603 EVT VT = N->getValueType(0); 1604 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 1605 return -1; 1606 1607 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1608 1609 // Find the first non-undef value in the shuffle mask. 1610 unsigned i; 1611 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 1612 /*search*/; 1613 1614 if (i == 4) return -1; // all undef. 1615 1616 // Otherwise, check to see if the rest of the elements are consecutively 1617 // numbered from this value. 1618 unsigned ShiftAmt = SVOp->getMaskElt(i); 1619 if (ShiftAmt < i) return -1; 1620 ShiftAmt -= i; 1621 1622 // Check the rest of the elements to see if they are consecutive. 1623 for (++i; i != 4; ++i) 1624 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1625 return -1; 1626 1627 return ShiftAmt; 1628 } 1629 1630 //===----------------------------------------------------------------------===// 1631 // Addressing Mode Selection 1632 //===----------------------------------------------------------------------===// 1633 1634 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 1635 /// or 64-bit immediate, and if the value can be accurately represented as a 1636 /// sign extension from a 16-bit value. If so, this returns true and the 1637 /// immediate. 1638 static bool isIntS16Immediate(SDNode *N, short &Imm) { 1639 if (!isa<ConstantSDNode>(N)) 1640 return false; 1641 1642 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue(); 1643 if (N->getValueType(0) == MVT::i32) 1644 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 1645 else 1646 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 1647 } 1648 static bool isIntS16Immediate(SDValue Op, short &Imm) { 1649 return isIntS16Immediate(Op.getNode(), Imm); 1650 } 1651 1652 /// SelectAddressRegReg - Given the specified addressed, check to see if it 1653 /// can be represented as an indexed [r+r] operation. Returns false if it 1654 /// can be more efficiently represented with [r+imm]. 1655 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 1656 SDValue &Index, 1657 SelectionDAG &DAG) const { 1658 short imm = 0; 1659 if (N.getOpcode() == ISD::ADD) { 1660 if (isIntS16Immediate(N.getOperand(1), imm)) 1661 return false; // r+i 1662 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 1663 return false; // r+i 1664 1665 Base = N.getOperand(0); 1666 Index = N.getOperand(1); 1667 return true; 1668 } else if (N.getOpcode() == ISD::OR) { 1669 if (isIntS16Immediate(N.getOperand(1), imm)) 1670 return false; // r+i can fold it if we can. 1671 1672 // If this is an or of disjoint bitfields, we can codegen this as an add 1673 // (for better address arithmetic) if the LHS and RHS of the OR are provably 1674 // disjoint. 1675 APInt LHSKnownZero, LHSKnownOne; 1676 APInt RHSKnownZero, RHSKnownOne; 1677 DAG.computeKnownBits(N.getOperand(0), 1678 LHSKnownZero, LHSKnownOne); 1679 1680 if (LHSKnownZero.getBoolValue()) { 1681 DAG.computeKnownBits(N.getOperand(1), 1682 RHSKnownZero, RHSKnownOne); 1683 // If all of the bits are known zero on the LHS or RHS, the add won't 1684 // carry. 1685 if (~(LHSKnownZero | RHSKnownZero) == 0) { 1686 Base = N.getOperand(0); 1687 Index = N.getOperand(1); 1688 return true; 1689 } 1690 } 1691 } 1692 1693 return false; 1694 } 1695 1696 // If we happen to be doing an i64 load or store into a stack slot that has 1697 // less than a 4-byte alignment, then the frame-index elimination may need to 1698 // use an indexed load or store instruction (because the offset may not be a 1699 // multiple of 4). The extra register needed to hold the offset comes from the 1700 // register scavenger, and it is possible that the scavenger will need to use 1701 // an emergency spill slot. As a result, we need to make sure that a spill slot 1702 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 1703 // stack slot. 1704 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 1705 // FIXME: This does not handle the LWA case. 1706 if (VT != MVT::i64) 1707 return; 1708 1709 // NOTE: We'll exclude negative FIs here, which come from argument 1710 // lowering, because there are no known test cases triggering this problem 1711 // using packed structures (or similar). We can remove this exclusion if 1712 // we find such a test case. The reason why this is so test-case driven is 1713 // because this entire 'fixup' is only to prevent crashes (from the 1714 // register scavenger) on not-really-valid inputs. For example, if we have: 1715 // %a = alloca i1 1716 // %b = bitcast i1* %a to i64* 1717 // store i64* a, i64 b 1718 // then the store should really be marked as 'align 1', but is not. If it 1719 // were marked as 'align 1' then the indexed form would have been 1720 // instruction-selected initially, and the problem this 'fixup' is preventing 1721 // won't happen regardless. 1722 if (FrameIdx < 0) 1723 return; 1724 1725 MachineFunction &MF = DAG.getMachineFunction(); 1726 MachineFrameInfo *MFI = MF.getFrameInfo(); 1727 1728 unsigned Align = MFI->getObjectAlignment(FrameIdx); 1729 if (Align >= 4) 1730 return; 1731 1732 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 1733 FuncInfo->setHasNonRISpills(); 1734 } 1735 1736 /// Returns true if the address N can be represented by a base register plus 1737 /// a signed 16-bit displacement [r+imm], and if it is not better 1738 /// represented as reg+reg. If Aligned is true, only accept displacements 1739 /// suitable for STD and friends, i.e. multiples of 4. 1740 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 1741 SDValue &Base, 1742 SelectionDAG &DAG, 1743 bool Aligned) const { 1744 // FIXME dl should come from parent load or store, not from address 1745 SDLoc dl(N); 1746 // If this can be more profitably realized as r+r, fail. 1747 if (SelectAddressRegReg(N, Disp, Base, DAG)) 1748 return false; 1749 1750 if (N.getOpcode() == ISD::ADD) { 1751 short imm = 0; 1752 if (isIntS16Immediate(N.getOperand(1), imm) && 1753 (!Aligned || (imm & 3) == 0)) { 1754 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1755 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1756 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1757 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1758 } else { 1759 Base = N.getOperand(0); 1760 } 1761 return true; // [r+i] 1762 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 1763 // Match LOAD (ADD (X, Lo(G))). 1764 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 1765 && "Cannot handle constant offsets yet!"); 1766 Disp = N.getOperand(1).getOperand(0); // The global address. 1767 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 1768 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 1769 Disp.getOpcode() == ISD::TargetConstantPool || 1770 Disp.getOpcode() == ISD::TargetJumpTable); 1771 Base = N.getOperand(0); 1772 return true; // [&g+r] 1773 } 1774 } else if (N.getOpcode() == ISD::OR) { 1775 short imm = 0; 1776 if (isIntS16Immediate(N.getOperand(1), imm) && 1777 (!Aligned || (imm & 3) == 0)) { 1778 // If this is an or of disjoint bitfields, we can codegen this as an add 1779 // (for better address arithmetic) if the LHS and RHS of the OR are 1780 // provably disjoint. 1781 APInt LHSKnownZero, LHSKnownOne; 1782 DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne); 1783 1784 if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 1785 // If all of the bits are known zero on the LHS or RHS, the add won't 1786 // carry. 1787 if (FrameIndexSDNode *FI = 1788 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 1789 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1790 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1791 } else { 1792 Base = N.getOperand(0); 1793 } 1794 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 1795 return true; 1796 } 1797 } 1798 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 1799 // Loading from a constant address. 1800 1801 // If this address fits entirely in a 16-bit sext immediate field, codegen 1802 // this as "d, 0" 1803 short Imm; 1804 if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) { 1805 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 1806 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1807 CN->getValueType(0)); 1808 return true; 1809 } 1810 1811 // Handle 32-bit sext immediates with LIS + addr mode. 1812 if ((CN->getValueType(0) == MVT::i32 || 1813 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 1814 (!Aligned || (CN->getZExtValue() & 3) == 0)) { 1815 int Addr = (int)CN->getZExtValue(); 1816 1817 // Otherwise, break this down into an LIS + disp. 1818 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 1819 1820 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 1821 MVT::i32); 1822 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 1823 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 1824 return true; 1825 } 1826 } 1827 1828 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 1829 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 1830 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 1831 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 1832 } else 1833 Base = N; 1834 return true; // [r+0] 1835 } 1836 1837 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 1838 /// represented as an indexed [r+r] operation. 1839 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 1840 SDValue &Index, 1841 SelectionDAG &DAG) const { 1842 // Check to see if we can easily represent this as an [r+r] address. This 1843 // will fail if it thinks that the address is more profitably represented as 1844 // reg+imm, e.g. where imm = 0. 1845 if (SelectAddressRegReg(N, Base, Index, DAG)) 1846 return true; 1847 1848 // If the operand is an addition, always emit this as [r+r], since this is 1849 // better (for code size, and execution, as the memop does the add for free) 1850 // than emitting an explicit add. 1851 if (N.getOpcode() == ISD::ADD) { 1852 Base = N.getOperand(0); 1853 Index = N.getOperand(1); 1854 return true; 1855 } 1856 1857 // Otherwise, do it the hard way, using R0 as the base register. 1858 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 1859 N.getValueType()); 1860 Index = N; 1861 return true; 1862 } 1863 1864 /// getPreIndexedAddressParts - returns true by value, base pointer and 1865 /// offset pointer and addressing mode by reference if the node's address 1866 /// can be legally represented as pre-indexed load / store address. 1867 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 1868 SDValue &Offset, 1869 ISD::MemIndexedMode &AM, 1870 SelectionDAG &DAG) const { 1871 if (DisablePPCPreinc) return false; 1872 1873 bool isLoad = true; 1874 SDValue Ptr; 1875 EVT VT; 1876 unsigned Alignment; 1877 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1878 Ptr = LD->getBasePtr(); 1879 VT = LD->getMemoryVT(); 1880 Alignment = LD->getAlignment(); 1881 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 1882 Ptr = ST->getBasePtr(); 1883 VT = ST->getMemoryVT(); 1884 Alignment = ST->getAlignment(); 1885 isLoad = false; 1886 } else 1887 return false; 1888 1889 // PowerPC doesn't have preinc load/store instructions for vectors (except 1890 // for QPX, which does have preinc r+r forms). 1891 if (VT.isVector()) { 1892 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 1893 return false; 1894 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 1895 AM = ISD::PRE_INC; 1896 return true; 1897 } 1898 } 1899 1900 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 1901 1902 // Common code will reject creating a pre-inc form if the base pointer 1903 // is a frame index, or if N is a store and the base pointer is either 1904 // the same as or a predecessor of the value being stored. Check for 1905 // those situations here, and try with swapped Base/Offset instead. 1906 bool Swap = false; 1907 1908 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 1909 Swap = true; 1910 else if (!isLoad) { 1911 SDValue Val = cast<StoreSDNode>(N)->getValue(); 1912 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 1913 Swap = true; 1914 } 1915 1916 if (Swap) 1917 std::swap(Base, Offset); 1918 1919 AM = ISD::PRE_INC; 1920 return true; 1921 } 1922 1923 // LDU/STU can only handle immediates that are a multiple of 4. 1924 if (VT != MVT::i64) { 1925 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false)) 1926 return false; 1927 } else { 1928 // LDU/STU need an address with at least 4-byte alignment. 1929 if (Alignment < 4) 1930 return false; 1931 1932 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true)) 1933 return false; 1934 } 1935 1936 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 1937 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 1938 // sext i32 to i64 when addr mode is r+i. 1939 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 1940 LD->getExtensionType() == ISD::SEXTLOAD && 1941 isa<ConstantSDNode>(Offset)) 1942 return false; 1943 } 1944 1945 AM = ISD::PRE_INC; 1946 return true; 1947 } 1948 1949 //===----------------------------------------------------------------------===// 1950 // LowerOperation implementation 1951 //===----------------------------------------------------------------------===// 1952 1953 /// GetLabelAccessInfo - Return true if we should reference labels using a 1954 /// PICBase, set the HiOpFlags and LoOpFlags to the target MO flags. 1955 static bool GetLabelAccessInfo(const TargetMachine &TM, 1956 const PPCSubtarget &Subtarget, 1957 unsigned &HiOpFlags, unsigned &LoOpFlags, 1958 const GlobalValue *GV = nullptr) { 1959 HiOpFlags = PPCII::MO_HA; 1960 LoOpFlags = PPCII::MO_LO; 1961 1962 // Don't use the pic base if not in PIC relocation model. 1963 bool isPIC = TM.getRelocationModel() == Reloc::PIC_; 1964 1965 if (isPIC) { 1966 HiOpFlags |= PPCII::MO_PIC_FLAG; 1967 LoOpFlags |= PPCII::MO_PIC_FLAG; 1968 } 1969 1970 // If this is a reference to a global value that requires a non-lazy-ptr, make 1971 // sure that instruction lowering adds it. 1972 if (GV && Subtarget.hasLazyResolverStub(GV)) { 1973 HiOpFlags |= PPCII::MO_NLP_FLAG; 1974 LoOpFlags |= PPCII::MO_NLP_FLAG; 1975 1976 if (GV->hasHiddenVisibility()) { 1977 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1978 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 1979 } 1980 } 1981 1982 return isPIC; 1983 } 1984 1985 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 1986 SelectionDAG &DAG) { 1987 SDLoc DL(HiPart); 1988 EVT PtrVT = HiPart.getValueType(); 1989 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 1990 1991 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 1992 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 1993 1994 // With PIC, the first instruction is actually "GR+hi(&G)". 1995 if (isPIC) 1996 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 1997 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 1998 1999 // Generate non-pic code that has direct accesses to the constant pool. 2000 // The address of the global is just (hi(&g)+lo(&g)). 2001 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2002 } 2003 2004 static void setUsesTOCBasePtr(MachineFunction &MF) { 2005 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2006 FuncInfo->setUsesTOCBasePtr(); 2007 } 2008 2009 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2010 setUsesTOCBasePtr(DAG.getMachineFunction()); 2011 } 2012 2013 static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit, 2014 SDValue GA) { 2015 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2016 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) : 2017 DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2018 2019 SDValue Ops[] = { GA, Reg }; 2020 return DAG.getMemIntrinsicNode( 2021 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2022 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, false, true, 2023 false, 0); 2024 } 2025 2026 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2027 SelectionDAG &DAG) const { 2028 EVT PtrVT = Op.getValueType(); 2029 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2030 const Constant *C = CP->getConstVal(); 2031 2032 // 64-bit SVR4 ABI code is always position-independent. 2033 // The actual address of the GlobalValue is stored in the TOC. 2034 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2035 setUsesTOCBasePtr(DAG); 2036 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2037 return getTOCEntry(DAG, SDLoc(CP), true, GA); 2038 } 2039 2040 unsigned MOHiFlag, MOLoFlag; 2041 bool isPIC = 2042 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2043 2044 if (isPIC && Subtarget.isSVR4ABI()) { 2045 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2046 PPCII::MO_PIC_FLAG); 2047 return getTOCEntry(DAG, SDLoc(CP), false, GA); 2048 } 2049 2050 SDValue CPIHi = 2051 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2052 SDValue CPILo = 2053 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2054 return LowerLabelRef(CPIHi, CPILo, isPIC, DAG); 2055 } 2056 2057 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2058 EVT PtrVT = Op.getValueType(); 2059 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2060 2061 // 64-bit SVR4 ABI code is always position-independent. 2062 // The actual address of the GlobalValue is stored in the TOC. 2063 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2064 setUsesTOCBasePtr(DAG); 2065 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2066 return getTOCEntry(DAG, SDLoc(JT), true, GA); 2067 } 2068 2069 unsigned MOHiFlag, MOLoFlag; 2070 bool isPIC = 2071 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2072 2073 if (isPIC && Subtarget.isSVR4ABI()) { 2074 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2075 PPCII::MO_PIC_FLAG); 2076 return getTOCEntry(DAG, SDLoc(GA), false, GA); 2077 } 2078 2079 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2080 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2081 return LowerLabelRef(JTIHi, JTILo, isPIC, DAG); 2082 } 2083 2084 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2085 SelectionDAG &DAG) const { 2086 EVT PtrVT = Op.getValueType(); 2087 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2088 const BlockAddress *BA = BASDN->getBlockAddress(); 2089 2090 // 64-bit SVR4 ABI code is always position-independent. 2091 // The actual BlockAddress is stored in the TOC. 2092 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2093 setUsesTOCBasePtr(DAG); 2094 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2095 return getTOCEntry(DAG, SDLoc(BASDN), true, GA); 2096 } 2097 2098 unsigned MOHiFlag, MOLoFlag; 2099 bool isPIC = 2100 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag); 2101 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2102 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2103 return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); 2104 } 2105 2106 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2107 SelectionDAG &DAG) const { 2108 2109 // FIXME: TLS addresses currently use medium model code sequences, 2110 // which is the most useful form. Eventually support for small and 2111 // large models could be added if users need it, at the cost of 2112 // additional complexity. 2113 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2114 if (DAG.getTarget().Options.EmulatedTLS) 2115 return LowerToTLSEmulatedModel(GA, DAG); 2116 2117 SDLoc dl(GA); 2118 const GlobalValue *GV = GA->getGlobal(); 2119 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2120 bool is64bit = Subtarget.isPPC64(); 2121 const Module *M = DAG.getMachineFunction().getFunction()->getParent(); 2122 PICLevel::Level picLevel = M->getPICLevel(); 2123 2124 TLSModel::Model Model = getTargetMachine().getTLSModel(GV); 2125 2126 if (Model == TLSModel::LocalExec) { 2127 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2128 PPCII::MO_TPREL_HA); 2129 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2130 PPCII::MO_TPREL_LO); 2131 SDValue TLSReg = DAG.getRegister(is64bit ? PPC::X13 : PPC::R2, 2132 is64bit ? MVT::i64 : MVT::i32); 2133 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2134 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2135 } 2136 2137 if (Model == TLSModel::InitialExec) { 2138 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2139 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2140 PPCII::MO_TLS); 2141 SDValue GOTPtr; 2142 if (is64bit) { 2143 setUsesTOCBasePtr(DAG); 2144 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2145 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2146 PtrVT, GOTReg, TGA); 2147 } else 2148 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2149 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2150 PtrVT, TGA, GOTPtr); 2151 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2152 } 2153 2154 if (Model == TLSModel::GeneralDynamic) { 2155 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2156 SDValue GOTPtr; 2157 if (is64bit) { 2158 setUsesTOCBasePtr(DAG); 2159 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2160 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2161 GOTReg, TGA); 2162 } else { 2163 if (picLevel == PICLevel::Small) 2164 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2165 else 2166 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2167 } 2168 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2169 GOTPtr, TGA, TGA); 2170 } 2171 2172 if (Model == TLSModel::LocalDynamic) { 2173 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2174 SDValue GOTPtr; 2175 if (is64bit) { 2176 setUsesTOCBasePtr(DAG); 2177 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2178 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2179 GOTReg, TGA); 2180 } else { 2181 if (picLevel == PICLevel::Small) 2182 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2183 else 2184 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2185 } 2186 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2187 PtrVT, GOTPtr, TGA, TGA); 2188 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2189 PtrVT, TLSAddr, TGA); 2190 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2191 } 2192 2193 llvm_unreachable("Unknown TLS model!"); 2194 } 2195 2196 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2197 SelectionDAG &DAG) const { 2198 EVT PtrVT = Op.getValueType(); 2199 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 2200 SDLoc DL(GSDN); 2201 const GlobalValue *GV = GSDN->getGlobal(); 2202 2203 // 64-bit SVR4 ABI code is always position-independent. 2204 // The actual address of the GlobalValue is stored in the TOC. 2205 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { 2206 setUsesTOCBasePtr(DAG); 2207 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 2208 return getTOCEntry(DAG, DL, true, GA); 2209 } 2210 2211 unsigned MOHiFlag, MOLoFlag; 2212 bool isPIC = 2213 GetLabelAccessInfo(DAG.getTarget(), Subtarget, MOHiFlag, MOLoFlag, GV); 2214 2215 if (isPIC && Subtarget.isSVR4ABI()) { 2216 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 2217 GSDN->getOffset(), 2218 PPCII::MO_PIC_FLAG); 2219 return getTOCEntry(DAG, DL, false, GA); 2220 } 2221 2222 SDValue GAHi = 2223 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 2224 SDValue GALo = 2225 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 2226 2227 SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); 2228 2229 // If the global reference is actually to a non-lazy-pointer, we have to do an 2230 // extra load to get the address of the global. 2231 if (MOHiFlag & PPCII::MO_NLP_FLAG) 2232 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo(), 2233 false, false, false, 0); 2234 return Ptr; 2235 } 2236 2237 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2238 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2239 SDLoc dl(Op); 2240 2241 if (Op.getValueType() == MVT::v2i64) { 2242 // When the operands themselves are v2i64 values, we need to do something 2243 // special because VSX has no underlying comparison operations for these. 2244 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 2245 // Equality can be handled by casting to the legal type for Altivec 2246 // comparisons, everything else needs to be expanded. 2247 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 2248 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 2249 DAG.getSetCC(dl, MVT::v4i32, 2250 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 2251 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 2252 CC)); 2253 } 2254 2255 return SDValue(); 2256 } 2257 2258 // We handle most of these in the usual way. 2259 return Op; 2260 } 2261 2262 // If we're comparing for equality to zero, expose the fact that this is 2263 // implented as a ctlz/srl pair on ppc, so that the dag combiner can 2264 // fold the new nodes. 2265 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 2266 if (C->isNullValue() && CC == ISD::SETEQ) { 2267 EVT VT = Op.getOperand(0).getValueType(); 2268 SDValue Zext = Op.getOperand(0); 2269 if (VT.bitsLT(MVT::i32)) { 2270 VT = MVT::i32; 2271 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 2272 } 2273 unsigned Log2b = Log2_32(VT.getSizeInBits()); 2274 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 2275 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 2276 DAG.getConstant(Log2b, dl, MVT::i32)); 2277 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 2278 } 2279 // Leave comparisons against 0 and -1 alone for now, since they're usually 2280 // optimized. FIXME: revisit this when we can custom lower all setcc 2281 // optimizations. 2282 if (C->isAllOnesValue() || C->isNullValue()) 2283 return SDValue(); 2284 } 2285 2286 // If we have an integer seteq/setne, turn it into a compare against zero 2287 // by xor'ing the rhs with the lhs, which is faster than setting a 2288 // condition register, reading it back out, and masking the correct bit. The 2289 // normal approach here uses sub to do this instead of xor. Using xor exposes 2290 // the result to other bit-twiddling opportunities. 2291 EVT LHSVT = Op.getOperand(0).getValueType(); 2292 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 2293 EVT VT = Op.getValueType(); 2294 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 2295 Op.getOperand(1)); 2296 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 2297 } 2298 return SDValue(); 2299 } 2300 2301 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, 2302 const PPCSubtarget &Subtarget) const { 2303 SDNode *Node = Op.getNode(); 2304 EVT VT = Node->getValueType(0); 2305 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2306 SDValue InChain = Node->getOperand(0); 2307 SDValue VAListPtr = Node->getOperand(1); 2308 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 2309 SDLoc dl(Node); 2310 2311 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 2312 2313 // gpr_index 2314 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2315 VAListPtr, MachinePointerInfo(SV), MVT::i8, 2316 false, false, false, 0); 2317 InChain = GprIndex.getValue(1); 2318 2319 if (VT == MVT::i64) { 2320 // Check if GprIndex is even 2321 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 2322 DAG.getConstant(1, dl, MVT::i32)); 2323 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 2324 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 2325 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 2326 DAG.getConstant(1, dl, MVT::i32)); 2327 // Align GprIndex to be even if it isn't 2328 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 2329 GprIndex); 2330 } 2331 2332 // fpr index is 1 byte after gpr 2333 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2334 DAG.getConstant(1, dl, MVT::i32)); 2335 2336 // fpr 2337 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 2338 FprPtr, MachinePointerInfo(SV), MVT::i8, 2339 false, false, false, 0); 2340 InChain = FprIndex.getValue(1); 2341 2342 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2343 DAG.getConstant(8, dl, MVT::i32)); 2344 2345 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 2346 DAG.getConstant(4, dl, MVT::i32)); 2347 2348 // areas 2349 SDValue OverflowArea = DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, 2350 MachinePointerInfo(), false, false, 2351 false, 0); 2352 InChain = OverflowArea.getValue(1); 2353 2354 SDValue RegSaveArea = DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, 2355 MachinePointerInfo(), false, false, 2356 false, 0); 2357 InChain = RegSaveArea.getValue(1); 2358 2359 // select overflow_area if index > 8 2360 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 2361 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 2362 2363 // adjustment constant gpr_index * 4/8 2364 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 2365 VT.isInteger() ? GprIndex : FprIndex, 2366 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 2367 MVT::i32)); 2368 2369 // OurReg = RegSaveArea + RegConstant 2370 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 2371 RegConstant); 2372 2373 // Floating types are 32 bytes into RegSaveArea 2374 if (VT.isFloatingPoint()) 2375 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 2376 DAG.getConstant(32, dl, MVT::i32)); 2377 2378 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 2379 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 2380 VT.isInteger() ? GprIndex : FprIndex, 2381 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 2382 MVT::i32)); 2383 2384 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 2385 VT.isInteger() ? VAListPtr : FprPtr, 2386 MachinePointerInfo(SV), 2387 MVT::i8, false, false, 0); 2388 2389 // determine if we should load from reg_save_area or overflow_area 2390 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 2391 2392 // increase overflow_area by 4/8 if gpr/fpr > 8 2393 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 2394 DAG.getConstant(VT.isInteger() ? 4 : 8, 2395 dl, MVT::i32)); 2396 2397 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 2398 OverflowAreaPlusN); 2399 2400 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, 2401 OverflowAreaPtr, 2402 MachinePointerInfo(), 2403 MVT::i32, false, false, 0); 2404 2405 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), 2406 false, false, false, 0); 2407 } 2408 2409 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG, 2410 const PPCSubtarget &Subtarget) const { 2411 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 2412 2413 // We have to copy the entire va_list struct: 2414 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 2415 return DAG.getMemcpy(Op.getOperand(0), Op, 2416 Op.getOperand(1), Op.getOperand(2), 2417 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 2418 false, MachinePointerInfo(), MachinePointerInfo()); 2419 } 2420 2421 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 2422 SelectionDAG &DAG) const { 2423 return Op.getOperand(0); 2424 } 2425 2426 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 2427 SelectionDAG &DAG) const { 2428 SDValue Chain = Op.getOperand(0); 2429 SDValue Trmp = Op.getOperand(1); // trampoline 2430 SDValue FPtr = Op.getOperand(2); // nested function 2431 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 2432 SDLoc dl(Op); 2433 2434 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 2435 bool isPPC64 = (PtrVT == MVT::i64); 2436 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 2437 2438 TargetLowering::ArgListTy Args; 2439 TargetLowering::ArgListEntry Entry; 2440 2441 Entry.Ty = IntPtrTy; 2442 Entry.Node = Trmp; Args.push_back(Entry); 2443 2444 // TrampSize == (isPPC64 ? 48 : 40); 2445 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 2446 isPPC64 ? MVT::i64 : MVT::i32); 2447 Args.push_back(Entry); 2448 2449 Entry.Node = FPtr; Args.push_back(Entry); 2450 Entry.Node = Nest; Args.push_back(Entry); 2451 2452 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 2453 TargetLowering::CallLoweringInfo CLI(DAG); 2454 CLI.setDebugLoc(dl).setChain(Chain) 2455 .setCallee(CallingConv::C, Type::getVoidTy(*DAG.getContext()), 2456 DAG.getExternalSymbol("__trampoline_setup", PtrVT), 2457 std::move(Args), 0); 2458 2459 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2460 return CallResult.second; 2461 } 2462 2463 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, 2464 const PPCSubtarget &Subtarget) const { 2465 MachineFunction &MF = DAG.getMachineFunction(); 2466 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2467 2468 SDLoc dl(Op); 2469 2470 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 2471 // vastart just stores the address of the VarArgsFrameIndex slot into the 2472 // memory location argument. 2473 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2474 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2475 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2476 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2477 MachinePointerInfo(SV), 2478 false, false, 0); 2479 } 2480 2481 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 2482 // We suppose the given va_list is already allocated. 2483 // 2484 // typedef struct { 2485 // char gpr; /* index into the array of 8 GPRs 2486 // * stored in the register save area 2487 // * gpr=0 corresponds to r3, 2488 // * gpr=1 to r4, etc. 2489 // */ 2490 // char fpr; /* index into the array of 8 FPRs 2491 // * stored in the register save area 2492 // * fpr=0 corresponds to f1, 2493 // * fpr=1 to f2, etc. 2494 // */ 2495 // char *overflow_arg_area; 2496 // /* location on stack that holds 2497 // * the next overflow argument 2498 // */ 2499 // char *reg_save_area; 2500 // /* where r3:r10 and f1:f8 (if saved) 2501 // * are stored 2502 // */ 2503 // } va_list[1]; 2504 2505 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 2506 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 2507 2508 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2509 2510 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 2511 PtrVT); 2512 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 2513 PtrVT); 2514 2515 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 2516 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 2517 2518 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 2519 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 2520 2521 uint64_t FPROffset = 1; 2522 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 2523 2524 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2525 2526 // Store first byte : number of int regs 2527 SDValue firstStore = DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, 2528 Op.getOperand(1), 2529 MachinePointerInfo(SV), 2530 MVT::i8, false, false, 0); 2531 uint64_t nextOffset = FPROffset; 2532 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 2533 ConstFPROffset); 2534 2535 // Store second byte : number of float regs 2536 SDValue secondStore = 2537 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 2538 MachinePointerInfo(SV, nextOffset), MVT::i8, 2539 false, false, 0); 2540 nextOffset += StackOffset; 2541 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 2542 2543 // Store second word : arguments given on stack 2544 SDValue thirdStore = 2545 DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 2546 MachinePointerInfo(SV, nextOffset), 2547 false, false, 0); 2548 nextOffset += FrameOffset; 2549 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 2550 2551 // Store third word : arguments given in registers 2552 return DAG.getStore(thirdStore, dl, FR, nextPtr, 2553 MachinePointerInfo(SV, nextOffset), 2554 false, false, 0); 2555 2556 } 2557 2558 #include "PPCGenCallingConv.inc" 2559 2560 // Function whose sole purpose is to kill compiler warnings 2561 // stemming from unused functions included from PPCGenCallingConv.inc. 2562 CCAssignFn *PPCTargetLowering::useFastISelCCs(unsigned Flag) const { 2563 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS; 2564 } 2565 2566 bool llvm::CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, 2567 CCValAssign::LocInfo &LocInfo, 2568 ISD::ArgFlagsTy &ArgFlags, 2569 CCState &State) { 2570 return true; 2571 } 2572 2573 bool llvm::CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, 2574 MVT &LocVT, 2575 CCValAssign::LocInfo &LocInfo, 2576 ISD::ArgFlagsTy &ArgFlags, 2577 CCState &State) { 2578 static const MCPhysReg ArgRegs[] = { 2579 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2580 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2581 }; 2582 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2583 2584 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2585 2586 // Skip one register if the first unallocated register has an even register 2587 // number and there are still argument registers available which have not been 2588 // allocated yet. RegNum is actually an index into ArgRegs, which means we 2589 // need to skip a register if RegNum is odd. 2590 if (RegNum != NumArgRegs && RegNum % 2 == 1) { 2591 State.AllocateReg(ArgRegs[RegNum]); 2592 } 2593 2594 // Always return false here, as this function only makes sure that the first 2595 // unallocated register has an odd register number and does not actually 2596 // allocate a register for the current argument. 2597 return false; 2598 } 2599 2600 bool llvm::CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, 2601 MVT &LocVT, 2602 CCValAssign::LocInfo &LocInfo, 2603 ISD::ArgFlagsTy &ArgFlags, 2604 CCState &State) { 2605 static const MCPhysReg ArgRegs[] = { 2606 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2607 PPC::F8 2608 }; 2609 2610 const unsigned NumArgRegs = array_lengthof(ArgRegs); 2611 2612 unsigned RegNum = State.getFirstUnallocated(ArgRegs); 2613 2614 // If there is only one Floating-point register left we need to put both f64 2615 // values of a split ppc_fp128 value on the stack. 2616 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { 2617 State.AllocateReg(ArgRegs[RegNum]); 2618 } 2619 2620 // Always return false here, as this function only makes sure that the two f64 2621 // values a ppc_fp128 value is split into are both passed in registers or both 2622 // passed on the stack and does not actually allocate a register for the 2623 // current argument. 2624 return false; 2625 } 2626 2627 /// FPR - The set of FP registers that should be allocated for arguments, 2628 /// on Darwin. 2629 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 2630 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 2631 PPC::F11, PPC::F12, PPC::F13}; 2632 2633 /// QFPR - The set of QPX registers that should be allocated for arguments. 2634 static const MCPhysReg QFPR[] = { 2635 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 2636 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 2637 2638 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 2639 /// the stack. 2640 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 2641 unsigned PtrByteSize) { 2642 unsigned ArgSize = ArgVT.getStoreSize(); 2643 if (Flags.isByVal()) 2644 ArgSize = Flags.getByValSize(); 2645 2646 // Round up to multiples of the pointer size, except for array members, 2647 // which are always packed. 2648 if (!Flags.isInConsecutiveRegs()) 2649 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2650 2651 return ArgSize; 2652 } 2653 2654 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 2655 /// on the stack. 2656 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 2657 ISD::ArgFlagsTy Flags, 2658 unsigned PtrByteSize) { 2659 unsigned Align = PtrByteSize; 2660 2661 // Altivec parameters are padded to a 16 byte boundary. 2662 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2663 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2664 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2665 ArgVT == MVT::v1i128) 2666 Align = 16; 2667 // QPX vector types stored in double-precision are padded to a 32 byte 2668 // boundary. 2669 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 2670 Align = 32; 2671 2672 // ByVal parameters are aligned as requested. 2673 if (Flags.isByVal()) { 2674 unsigned BVAlign = Flags.getByValAlign(); 2675 if (BVAlign > PtrByteSize) { 2676 if (BVAlign % PtrByteSize != 0) 2677 llvm_unreachable( 2678 "ByVal alignment is not a multiple of the pointer size"); 2679 2680 Align = BVAlign; 2681 } 2682 } 2683 2684 // Array members are always packed to their original alignment. 2685 if (Flags.isInConsecutiveRegs()) { 2686 // If the array member was split into multiple registers, the first 2687 // needs to be aligned to the size of the full type. (Except for 2688 // ppcf128, which is only aligned as its f64 components.) 2689 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 2690 Align = OrigVT.getStoreSize(); 2691 else 2692 Align = ArgVT.getStoreSize(); 2693 } 2694 2695 return Align; 2696 } 2697 2698 /// CalculateStackSlotUsed - Return whether this argument will use its 2699 /// stack slot (instead of being passed in registers). ArgOffset, 2700 /// AvailableFPRs, and AvailableVRs must hold the current argument 2701 /// position, and will be updated to account for this argument. 2702 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 2703 ISD::ArgFlagsTy Flags, 2704 unsigned PtrByteSize, 2705 unsigned LinkageSize, 2706 unsigned ParamAreaSize, 2707 unsigned &ArgOffset, 2708 unsigned &AvailableFPRs, 2709 unsigned &AvailableVRs, bool HasQPX) { 2710 bool UseMemory = false; 2711 2712 // Respect alignment of argument on the stack. 2713 unsigned Align = 2714 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 2715 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 2716 // If there's no space left in the argument save area, we must 2717 // use memory (this check also catches zero-sized arguments). 2718 if (ArgOffset >= LinkageSize + ParamAreaSize) 2719 UseMemory = true; 2720 2721 // Allocate argument on the stack. 2722 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 2723 if (Flags.isInConsecutiveRegsLast()) 2724 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 2725 // If we overran the argument save area, we must use memory 2726 // (this check catches arguments passed partially in memory) 2727 if (ArgOffset > LinkageSize + ParamAreaSize) 2728 UseMemory = true; 2729 2730 // However, if the argument is actually passed in an FPR or a VR, 2731 // we don't use memory after all. 2732 if (!Flags.isByVal()) { 2733 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 2734 // QPX registers overlap with the scalar FP registers. 2735 (HasQPX && (ArgVT == MVT::v4f32 || 2736 ArgVT == MVT::v4f64 || 2737 ArgVT == MVT::v4i1))) 2738 if (AvailableFPRs > 0) { 2739 --AvailableFPRs; 2740 return false; 2741 } 2742 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 2743 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 2744 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 2745 ArgVT == MVT::v1i128) 2746 if (AvailableVRs > 0) { 2747 --AvailableVRs; 2748 return false; 2749 } 2750 } 2751 2752 return UseMemory; 2753 } 2754 2755 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 2756 /// ensure minimum alignment required for target. 2757 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 2758 unsigned NumBytes) { 2759 unsigned TargetAlign = Lowering->getStackAlignment(); 2760 unsigned AlignMask = TargetAlign - 1; 2761 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 2762 return NumBytes; 2763 } 2764 2765 SDValue 2766 PPCTargetLowering::LowerFormalArguments(SDValue Chain, 2767 CallingConv::ID CallConv, bool isVarArg, 2768 const SmallVectorImpl<ISD::InputArg> 2769 &Ins, 2770 SDLoc dl, SelectionDAG &DAG, 2771 SmallVectorImpl<SDValue> &InVals) 2772 const { 2773 if (Subtarget.isSVR4ABI()) { 2774 if (Subtarget.isPPC64()) 2775 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, 2776 dl, DAG, InVals); 2777 else 2778 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, 2779 dl, DAG, InVals); 2780 } else { 2781 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, 2782 dl, DAG, InVals); 2783 } 2784 } 2785 2786 SDValue 2787 PPCTargetLowering::LowerFormalArguments_32SVR4( 2788 SDValue Chain, 2789 CallingConv::ID CallConv, bool isVarArg, 2790 const SmallVectorImpl<ISD::InputArg> 2791 &Ins, 2792 SDLoc dl, SelectionDAG &DAG, 2793 SmallVectorImpl<SDValue> &InVals) const { 2794 2795 // 32-bit SVR4 ABI Stack Frame Layout: 2796 // +-----------------------------------+ 2797 // +--> | Back chain | 2798 // | +-----------------------------------+ 2799 // | | Floating-point register save area | 2800 // | +-----------------------------------+ 2801 // | | General register save area | 2802 // | +-----------------------------------+ 2803 // | | CR save word | 2804 // | +-----------------------------------+ 2805 // | | VRSAVE save word | 2806 // | +-----------------------------------+ 2807 // | | Alignment padding | 2808 // | +-----------------------------------+ 2809 // | | Vector register save area | 2810 // | +-----------------------------------+ 2811 // | | Local variable space | 2812 // | +-----------------------------------+ 2813 // | | Parameter list area | 2814 // | +-----------------------------------+ 2815 // | | LR save word | 2816 // | +-----------------------------------+ 2817 // SP--> +--- | Back chain | 2818 // +-----------------------------------+ 2819 // 2820 // Specifications: 2821 // System V Application Binary Interface PowerPC Processor Supplement 2822 // AltiVec Technology Programming Interface Manual 2823 2824 MachineFunction &MF = DAG.getMachineFunction(); 2825 MachineFrameInfo *MFI = MF.getFrameInfo(); 2826 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2827 2828 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 2829 // Potential tail calls could cause overwriting of argument stack slots. 2830 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 2831 (CallConv == CallingConv::Fast)); 2832 unsigned PtrByteSize = 4; 2833 2834 // Assign locations to all of the incoming arguments. 2835 SmallVector<CCValAssign, 16> ArgLocs; 2836 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2837 *DAG.getContext()); 2838 2839 // Reserve space for the linkage area on the stack. 2840 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 2841 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 2842 2843 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 2844 2845 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2846 CCValAssign &VA = ArgLocs[i]; 2847 2848 // Arguments stored in registers. 2849 if (VA.isRegLoc()) { 2850 const TargetRegisterClass *RC; 2851 EVT ValVT = VA.getValVT(); 2852 2853 switch (ValVT.getSimpleVT().SimpleTy) { 2854 default: 2855 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 2856 case MVT::i1: 2857 case MVT::i32: 2858 RC = &PPC::GPRCRegClass; 2859 break; 2860 case MVT::f32: 2861 if (Subtarget.hasP8Vector()) 2862 RC = &PPC::VSSRCRegClass; 2863 else 2864 RC = &PPC::F4RCRegClass; 2865 break; 2866 case MVT::f64: 2867 if (Subtarget.hasVSX()) 2868 RC = &PPC::VSFRCRegClass; 2869 else 2870 RC = &PPC::F8RCRegClass; 2871 break; 2872 case MVT::v16i8: 2873 case MVT::v8i16: 2874 case MVT::v4i32: 2875 RC = &PPC::VRRCRegClass; 2876 break; 2877 case MVT::v4f32: 2878 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 2879 break; 2880 case MVT::v2f64: 2881 case MVT::v2i64: 2882 RC = &PPC::VSHRCRegClass; 2883 break; 2884 case MVT::v4f64: 2885 RC = &PPC::QFRCRegClass; 2886 break; 2887 case MVT::v4i1: 2888 RC = &PPC::QBRCRegClass; 2889 break; 2890 } 2891 2892 // Transform the arguments stored in physical registers into virtual ones. 2893 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2894 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 2895 ValVT == MVT::i1 ? MVT::i32 : ValVT); 2896 2897 if (ValVT == MVT::i1) 2898 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 2899 2900 InVals.push_back(ArgValue); 2901 } else { 2902 // Argument stored in memory. 2903 assert(VA.isMemLoc()); 2904 2905 unsigned ArgSize = VA.getLocVT().getStoreSize(); 2906 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset(), 2907 isImmutable); 2908 2909 // Create load nodes to retrieve arguments from the stack. 2910 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 2911 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2912 MachinePointerInfo(), 2913 false, false, false, 0)); 2914 } 2915 } 2916 2917 // Assign locations to all of the incoming aggregate by value arguments. 2918 // Aggregates passed by value are stored in the local variable space of the 2919 // caller's stack frame, right above the parameter list area. 2920 SmallVector<CCValAssign, 16> ByValArgLocs; 2921 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2922 ByValArgLocs, *DAG.getContext()); 2923 2924 // Reserve stack space for the allocations in CCInfo. 2925 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 2926 2927 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 2928 2929 // Area that is at least reserved in the caller of this function. 2930 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 2931 MinReservedArea = std::max(MinReservedArea, LinkageSize); 2932 2933 // Set the size that is at least reserved in caller of this function. Tail 2934 // call optimized function's reserved stack space needs to be aligned so that 2935 // taking the difference between two stack areas will result in an aligned 2936 // stack. 2937 MinReservedArea = 2938 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 2939 FuncInfo->setMinReservedArea(MinReservedArea); 2940 2941 SmallVector<SDValue, 8> MemOps; 2942 2943 // If the function takes variable number of arguments, make a frame index for 2944 // the start of the first vararg value... for expansion of llvm.va_start. 2945 if (isVarArg) { 2946 static const MCPhysReg GPArgRegs[] = { 2947 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 2948 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 2949 }; 2950 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 2951 2952 static const MCPhysReg FPArgRegs[] = { 2953 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 2954 PPC::F8 2955 }; 2956 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 2957 2958 if (Subtarget.useSoftFloat()) 2959 NumFPArgRegs = 0; 2960 2961 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 2962 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 2963 2964 // Make room for NumGPArgRegs and NumFPArgRegs. 2965 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 2966 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 2967 2968 FuncInfo->setVarArgsStackOffset( 2969 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 2970 CCInfo.getNextStackOffset(), true)); 2971 2972 FuncInfo->setVarArgsFrameIndex(MFI->CreateStackObject(Depth, 8, false)); 2973 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2974 2975 // The fixed integer arguments of a variadic function are stored to the 2976 // VarArgsFrameIndex on the stack so that they may be loaded by deferencing 2977 // the result of va_next. 2978 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 2979 // Get an existing live-in vreg, or add a new one. 2980 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 2981 if (!VReg) 2982 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 2983 2984 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 2985 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 2986 MachinePointerInfo(), false, false, 0); 2987 MemOps.push_back(Store); 2988 // Increment the address by four for the next argument to store 2989 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 2990 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 2991 } 2992 2993 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 2994 // is set. 2995 // The double arguments are stored to the VarArgsFrameIndex 2996 // on the stack. 2997 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 2998 // Get an existing live-in vreg, or add a new one. 2999 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3000 if (!VReg) 3001 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3002 3003 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3004 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3005 MachinePointerInfo(), false, false, 0); 3006 MemOps.push_back(Store); 3007 // Increment the address by eight for the next argument to store 3008 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3009 PtrVT); 3010 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3011 } 3012 } 3013 3014 if (!MemOps.empty()) 3015 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3016 3017 return Chain; 3018 } 3019 3020 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3021 // value to MVT::i64 and then truncate to the correct register size. 3022 SDValue 3023 PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, 3024 SelectionDAG &DAG, SDValue ArgVal, 3025 SDLoc dl) const { 3026 if (Flags.isSExt()) 3027 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3028 DAG.getValueType(ObjectVT)); 3029 else if (Flags.isZExt()) 3030 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3031 DAG.getValueType(ObjectVT)); 3032 3033 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3034 } 3035 3036 SDValue 3037 PPCTargetLowering::LowerFormalArguments_64SVR4( 3038 SDValue Chain, 3039 CallingConv::ID CallConv, bool isVarArg, 3040 const SmallVectorImpl<ISD::InputArg> 3041 &Ins, 3042 SDLoc dl, SelectionDAG &DAG, 3043 SmallVectorImpl<SDValue> &InVals) const { 3044 // TODO: add description of PPC stack frame format, or at least some docs. 3045 // 3046 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3047 bool isLittleEndian = Subtarget.isLittleEndian(); 3048 MachineFunction &MF = DAG.getMachineFunction(); 3049 MachineFrameInfo *MFI = MF.getFrameInfo(); 3050 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3051 3052 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3053 "fastcc not supported on varargs functions"); 3054 3055 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3056 // Potential tail calls could cause overwriting of argument stack slots. 3057 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3058 (CallConv == CallingConv::Fast)); 3059 unsigned PtrByteSize = 8; 3060 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3061 3062 static const MCPhysReg GPR[] = { 3063 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3064 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3065 }; 3066 static const MCPhysReg VR[] = { 3067 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3068 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3069 }; 3070 static const MCPhysReg VSRH[] = { 3071 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 3072 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 3073 }; 3074 3075 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3076 const unsigned Num_FPR_Regs = 13; 3077 const unsigned Num_VR_Regs = array_lengthof(VR); 3078 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3079 3080 // Do a first pass over the arguments to determine whether the ABI 3081 // guarantees that our caller has allocated the parameter save area 3082 // on its stack frame. In the ELFv1 ABI, this is always the case; 3083 // in the ELFv2 ABI, it is true if this is a vararg function or if 3084 // any parameter is located in a stack slot. 3085 3086 bool HasParameterArea = !isELFv2ABI || isVarArg; 3087 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3088 unsigned NumBytes = LinkageSize; 3089 unsigned AvailableFPRs = Num_FPR_Regs; 3090 unsigned AvailableVRs = Num_VR_Regs; 3091 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3092 if (Ins[i].Flags.isNest()) 3093 continue; 3094 3095 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3096 PtrByteSize, LinkageSize, ParamAreaSize, 3097 NumBytes, AvailableFPRs, AvailableVRs, 3098 Subtarget.hasQPX())) 3099 HasParameterArea = true; 3100 } 3101 3102 // Add DAG nodes to load the arguments or copy them out of registers. On 3103 // entry to a function on PPC, the arguments start after the linkage area, 3104 // although the first ones are often in registers. 3105 3106 unsigned ArgOffset = LinkageSize; 3107 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3108 unsigned &QFPR_idx = FPR_idx; 3109 SmallVector<SDValue, 8> MemOps; 3110 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3111 unsigned CurArgIdx = 0; 3112 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3113 SDValue ArgVal; 3114 bool needsLoad = false; 3115 EVT ObjectVT = Ins[ArgNo].VT; 3116 EVT OrigVT = Ins[ArgNo].ArgVT; 3117 unsigned ObjSize = ObjectVT.getStoreSize(); 3118 unsigned ArgSize = ObjSize; 3119 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3120 if (Ins[ArgNo].isOrigArg()) { 3121 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3122 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3123 } 3124 // We re-align the argument offset for each argument, except when using the 3125 // fast calling convention, when we need to make sure we do that only when 3126 // we'll actually use a stack slot. 3127 unsigned CurArgOffset, Align; 3128 auto ComputeArgOffset = [&]() { 3129 /* Respect alignment of argument on the stack. */ 3130 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3131 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3132 CurArgOffset = ArgOffset; 3133 }; 3134 3135 if (CallConv != CallingConv::Fast) { 3136 ComputeArgOffset(); 3137 3138 /* Compute GPR index associated with argument offset. */ 3139 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3140 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3141 } 3142 3143 // FIXME the codegen can be much improved in some cases. 3144 // We do not have to keep everything in memory. 3145 if (Flags.isByVal()) { 3146 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3147 3148 if (CallConv == CallingConv::Fast) 3149 ComputeArgOffset(); 3150 3151 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3152 ObjSize = Flags.getByValSize(); 3153 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3154 // Empty aggregate parameters do not take up registers. Examples: 3155 // struct { } a; 3156 // union { } b; 3157 // int c[0]; 3158 // etc. However, we have to provide a place-holder in InVals, so 3159 // pretend we have an 8-byte item at the current address for that 3160 // purpose. 3161 if (!ObjSize) { 3162 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3163 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3164 InVals.push_back(FIN); 3165 continue; 3166 } 3167 3168 // Create a stack object covering all stack doublewords occupied 3169 // by the argument. If the argument is (fully or partially) on 3170 // the stack, or if the argument is fully in registers but the 3171 // caller has allocated the parameter save anyway, we can refer 3172 // directly to the caller's stack frame. Otherwise, create a 3173 // local copy in our own frame. 3174 int FI; 3175 if (HasParameterArea || 3176 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3177 FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false, true); 3178 else 3179 FI = MFI->CreateStackObject(ArgSize, Align, false); 3180 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3181 3182 // Handle aggregates smaller than 8 bytes. 3183 if (ObjSize < PtrByteSize) { 3184 // The value of the object is its address, which differs from the 3185 // address of the enclosing doubleword on big-endian systems. 3186 SDValue Arg = FIN; 3187 if (!isLittleEndian) { 3188 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3189 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3190 } 3191 InVals.push_back(Arg); 3192 3193 if (GPR_idx != Num_GPR_Regs) { 3194 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3195 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3196 SDValue Store; 3197 3198 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3199 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3200 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3201 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3202 MachinePointerInfo(&*FuncArg), ObjType, 3203 false, false, 0); 3204 } else { 3205 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3206 // store the whole register as-is to the parameter save area 3207 // slot. 3208 Store = 3209 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3210 MachinePointerInfo(&*FuncArg), false, false, 0); 3211 } 3212 3213 MemOps.push_back(Store); 3214 } 3215 // Whether we copied from a register or not, advance the offset 3216 // into the parameter save area by a full doubleword. 3217 ArgOffset += PtrByteSize; 3218 continue; 3219 } 3220 3221 // The value of the object is its address, which is the address of 3222 // its first stack doubleword. 3223 InVals.push_back(FIN); 3224 3225 // Store whatever pieces of the object are in registers to memory. 3226 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3227 if (GPR_idx == Num_GPR_Regs) 3228 break; 3229 3230 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3231 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3232 SDValue Addr = FIN; 3233 if (j) { 3234 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3235 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3236 } 3237 SDValue Store = 3238 DAG.getStore(Val.getValue(1), dl, Val, Addr, 3239 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3240 MemOps.push_back(Store); 3241 ++GPR_idx; 3242 } 3243 ArgOffset += ArgSize; 3244 continue; 3245 } 3246 3247 switch (ObjectVT.getSimpleVT().SimpleTy) { 3248 default: llvm_unreachable("Unhandled argument type!"); 3249 case MVT::i1: 3250 case MVT::i32: 3251 case MVT::i64: 3252 if (Flags.isNest()) { 3253 // The 'nest' parameter, if any, is passed in R11. 3254 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3255 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3256 3257 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3258 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3259 3260 break; 3261 } 3262 3263 // These can be scalar arguments or elements of an integer array type 3264 // passed directly. Clang may use those instead of "byval" aggregate 3265 // types to avoid forcing arguments to memory unnecessarily. 3266 if (GPR_idx != Num_GPR_Regs) { 3267 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3268 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3269 3270 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3271 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3272 // value to MVT::i64 and then truncate to the correct register size. 3273 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3274 } else { 3275 if (CallConv == CallingConv::Fast) 3276 ComputeArgOffset(); 3277 3278 needsLoad = true; 3279 ArgSize = PtrByteSize; 3280 } 3281 if (CallConv != CallingConv::Fast || needsLoad) 3282 ArgOffset += 8; 3283 break; 3284 3285 case MVT::f32: 3286 case MVT::f64: 3287 // These can be scalar arguments or elements of a float array type 3288 // passed directly. The latter are used to implement ELFv2 homogenous 3289 // float aggregates. 3290 if (FPR_idx != Num_FPR_Regs) { 3291 unsigned VReg; 3292 3293 if (ObjectVT == MVT::f32) 3294 VReg = MF.addLiveIn(FPR[FPR_idx], 3295 Subtarget.hasP8Vector() 3296 ? &PPC::VSSRCRegClass 3297 : &PPC::F4RCRegClass); 3298 else 3299 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 3300 ? &PPC::VSFRCRegClass 3301 : &PPC::F8RCRegClass); 3302 3303 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3304 ++FPR_idx; 3305 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 3306 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 3307 // once we support fp <-> gpr moves. 3308 3309 // This can only ever happen in the presence of f32 array types, 3310 // since otherwise we never run out of FPRs before running out 3311 // of GPRs. 3312 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3313 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3314 3315 if (ObjectVT == MVT::f32) { 3316 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 3317 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 3318 DAG.getConstant(32, dl, MVT::i32)); 3319 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 3320 } 3321 3322 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 3323 } else { 3324 if (CallConv == CallingConv::Fast) 3325 ComputeArgOffset(); 3326 3327 needsLoad = true; 3328 } 3329 3330 // When passing an array of floats, the array occupies consecutive 3331 // space in the argument area; only round up to the next doubleword 3332 // at the end of the array. Otherwise, each float takes 8 bytes. 3333 if (CallConv != CallingConv::Fast || needsLoad) { 3334 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 3335 ArgOffset += ArgSize; 3336 if (Flags.isInConsecutiveRegsLast()) 3337 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3338 } 3339 break; 3340 case MVT::v4f32: 3341 case MVT::v4i32: 3342 case MVT::v8i16: 3343 case MVT::v16i8: 3344 case MVT::v2f64: 3345 case MVT::v2i64: 3346 case MVT::v1i128: 3347 if (!Subtarget.hasQPX()) { 3348 // These can be scalar arguments or elements of a vector array type 3349 // passed directly. The latter are used to implement ELFv2 homogenous 3350 // vector aggregates. 3351 if (VR_idx != Num_VR_Regs) { 3352 unsigned VReg = (ObjectVT == MVT::v2f64 || ObjectVT == MVT::v2i64) ? 3353 MF.addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) : 3354 MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3355 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3356 ++VR_idx; 3357 } else { 3358 if (CallConv == CallingConv::Fast) 3359 ComputeArgOffset(); 3360 3361 needsLoad = true; 3362 } 3363 if (CallConv != CallingConv::Fast || needsLoad) 3364 ArgOffset += 16; 3365 break; 3366 } // not QPX 3367 3368 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 3369 "Invalid QPX parameter type"); 3370 /* fall through */ 3371 3372 case MVT::v4f64: 3373 case MVT::v4i1: 3374 // QPX vectors are treated like their scalar floating-point subregisters 3375 // (except that they're larger). 3376 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 3377 if (QFPR_idx != Num_QFPR_Regs) { 3378 const TargetRegisterClass *RC; 3379 switch (ObjectVT.getSimpleVT().SimpleTy) { 3380 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 3381 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 3382 default: RC = &PPC::QBRCRegClass; break; 3383 } 3384 3385 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 3386 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3387 ++QFPR_idx; 3388 } else { 3389 if (CallConv == CallingConv::Fast) 3390 ComputeArgOffset(); 3391 needsLoad = true; 3392 } 3393 if (CallConv != CallingConv::Fast || needsLoad) 3394 ArgOffset += Sz; 3395 break; 3396 } 3397 3398 // We need to load the argument to a virtual register if we determined 3399 // above that we ran out of physical registers of the appropriate type. 3400 if (needsLoad) { 3401 if (ObjSize < ArgSize && !isLittleEndian) 3402 CurArgOffset += ArgSize - ObjSize; 3403 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 3404 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3405 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3406 false, false, false, 0); 3407 } 3408 3409 InVals.push_back(ArgVal); 3410 } 3411 3412 // Area that is at least reserved in the caller of this function. 3413 unsigned MinReservedArea; 3414 if (HasParameterArea) 3415 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 3416 else 3417 MinReservedArea = LinkageSize; 3418 3419 // Set the size that is at least reserved in caller of this function. Tail 3420 // call optimized functions' reserved stack space needs to be aligned so that 3421 // taking the difference between two stack areas will result in an aligned 3422 // stack. 3423 MinReservedArea = 3424 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3425 FuncInfo->setMinReservedArea(MinReservedArea); 3426 3427 // If the function takes variable number of arguments, make a frame index for 3428 // the start of the first vararg value... for expansion of llvm.va_start. 3429 if (isVarArg) { 3430 int Depth = ArgOffset; 3431 3432 FuncInfo->setVarArgsFrameIndex( 3433 MFI->CreateFixedObject(PtrByteSize, Depth, true)); 3434 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3435 3436 // If this function is vararg, store any remaining integer argument regs 3437 // to their spots on the stack so that they may be loaded by deferencing the 3438 // result of va_next. 3439 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3440 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 3441 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3442 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3443 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3444 MachinePointerInfo(), false, false, 0); 3445 MemOps.push_back(Store); 3446 // Increment the address by four for the next argument to store 3447 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 3448 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3449 } 3450 } 3451 3452 if (!MemOps.empty()) 3453 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3454 3455 return Chain; 3456 } 3457 3458 SDValue 3459 PPCTargetLowering::LowerFormalArguments_Darwin( 3460 SDValue Chain, 3461 CallingConv::ID CallConv, bool isVarArg, 3462 const SmallVectorImpl<ISD::InputArg> 3463 &Ins, 3464 SDLoc dl, SelectionDAG &DAG, 3465 SmallVectorImpl<SDValue> &InVals) const { 3466 // TODO: add description of PPC stack frame format, or at least some docs. 3467 // 3468 MachineFunction &MF = DAG.getMachineFunction(); 3469 MachineFrameInfo *MFI = MF.getFrameInfo(); 3470 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3471 3472 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 3473 bool isPPC64 = PtrVT == MVT::i64; 3474 // Potential tail calls could cause overwriting of argument stack slots. 3475 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3476 (CallConv == CallingConv::Fast)); 3477 unsigned PtrByteSize = isPPC64 ? 8 : 4; 3478 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3479 unsigned ArgOffset = LinkageSize; 3480 // Area that is at least reserved in caller of this function. 3481 unsigned MinReservedArea = ArgOffset; 3482 3483 static const MCPhysReg GPR_32[] = { // 32-bit registers. 3484 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3485 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3486 }; 3487 static const MCPhysReg GPR_64[] = { // 64-bit registers. 3488 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3489 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3490 }; 3491 static const MCPhysReg VR[] = { 3492 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3493 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3494 }; 3495 3496 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 3497 const unsigned Num_FPR_Regs = 13; 3498 const unsigned Num_VR_Regs = array_lengthof( VR); 3499 3500 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3501 3502 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 3503 3504 // In 32-bit non-varargs functions, the stack space for vectors is after the 3505 // stack space for non-vectors. We do not use this space unless we have 3506 // too many vectors to fit in registers, something that only occurs in 3507 // constructed examples:), but we have to walk the arglist to figure 3508 // that out...for the pathological case, compute VecArgOffset as the 3509 // start of the vector parameter area. Computing VecArgOffset is the 3510 // entire point of the following loop. 3511 unsigned VecArgOffset = ArgOffset; 3512 if (!isVarArg && !isPPC64) { 3513 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 3514 ++ArgNo) { 3515 EVT ObjectVT = Ins[ArgNo].VT; 3516 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3517 3518 if (Flags.isByVal()) { 3519 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 3520 unsigned ObjSize = Flags.getByValSize(); 3521 unsigned ArgSize = 3522 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3523 VecArgOffset += ArgSize; 3524 continue; 3525 } 3526 3527 switch(ObjectVT.getSimpleVT().SimpleTy) { 3528 default: llvm_unreachable("Unhandled argument type!"); 3529 case MVT::i1: 3530 case MVT::i32: 3531 case MVT::f32: 3532 VecArgOffset += 4; 3533 break; 3534 case MVT::i64: // PPC64 3535 case MVT::f64: 3536 // FIXME: We are guaranteed to be !isPPC64 at this point. 3537 // Does MVT::i64 apply? 3538 VecArgOffset += 8; 3539 break; 3540 case MVT::v4f32: 3541 case MVT::v4i32: 3542 case MVT::v8i16: 3543 case MVT::v16i8: 3544 // Nothing to do, we're only looking at Nonvector args here. 3545 break; 3546 } 3547 } 3548 } 3549 // We've found where the vector parameter area in memory is. Skip the 3550 // first 12 parameters; these don't use that memory. 3551 VecArgOffset = ((VecArgOffset+15)/16)*16; 3552 VecArgOffset += 12*16; 3553 3554 // Add DAG nodes to load the arguments or copy them out of registers. On 3555 // entry to a function on PPC, the arguments start after the linkage area, 3556 // although the first ones are often in registers. 3557 3558 SmallVector<SDValue, 8> MemOps; 3559 unsigned nAltivecParamsAtEnd = 0; 3560 Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); 3561 unsigned CurArgIdx = 0; 3562 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3563 SDValue ArgVal; 3564 bool needsLoad = false; 3565 EVT ObjectVT = Ins[ArgNo].VT; 3566 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 3567 unsigned ArgSize = ObjSize; 3568 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3569 if (Ins[ArgNo].isOrigArg()) { 3570 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3571 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3572 } 3573 unsigned CurArgOffset = ArgOffset; 3574 3575 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 3576 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 3577 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 3578 if (isVarArg || isPPC64) { 3579 MinReservedArea = ((MinReservedArea+15)/16)*16; 3580 MinReservedArea += CalculateStackSlotSize(ObjectVT, 3581 Flags, 3582 PtrByteSize); 3583 } else nAltivecParamsAtEnd++; 3584 } else 3585 // Calculate min reserved area. 3586 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 3587 Flags, 3588 PtrByteSize); 3589 3590 // FIXME the codegen can be much improved in some cases. 3591 // We do not have to keep everything in memory. 3592 if (Flags.isByVal()) { 3593 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3594 3595 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3596 ObjSize = Flags.getByValSize(); 3597 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3598 // Objects of size 1 and 2 are right justified, everything else is 3599 // left justified. This means the memory address is adjusted forwards. 3600 if (ObjSize==1 || ObjSize==2) { 3601 CurArgOffset = CurArgOffset + (4 - ObjSize); 3602 } 3603 // The value of the object is its address. 3604 int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false, true); 3605 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3606 InVals.push_back(FIN); 3607 if (ObjSize==1 || ObjSize==2) { 3608 if (GPR_idx != Num_GPR_Regs) { 3609 unsigned VReg; 3610 if (isPPC64) 3611 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3612 else 3613 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3614 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3615 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 3616 SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 3617 MachinePointerInfo(&*FuncArg), 3618 ObjType, false, false, 0); 3619 MemOps.push_back(Store); 3620 ++GPR_idx; 3621 } 3622 3623 ArgOffset += PtrByteSize; 3624 3625 continue; 3626 } 3627 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3628 // Store whatever pieces of the object are in registers 3629 // to memory. ArgOffset will be the address of the beginning 3630 // of the object. 3631 if (GPR_idx != Num_GPR_Regs) { 3632 unsigned VReg; 3633 if (isPPC64) 3634 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3635 else 3636 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3637 int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); 3638 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3639 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3640 SDValue Store = 3641 DAG.getStore(Val.getValue(1), dl, Val, FIN, 3642 MachinePointerInfo(&*FuncArg, j), false, false, 0); 3643 MemOps.push_back(Store); 3644 ++GPR_idx; 3645 ArgOffset += PtrByteSize; 3646 } else { 3647 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 3648 break; 3649 } 3650 } 3651 continue; 3652 } 3653 3654 switch (ObjectVT.getSimpleVT().SimpleTy) { 3655 default: llvm_unreachable("Unhandled argument type!"); 3656 case MVT::i1: 3657 case MVT::i32: 3658 if (!isPPC64) { 3659 if (GPR_idx != Num_GPR_Regs) { 3660 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3661 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 3662 3663 if (ObjectVT == MVT::i1) 3664 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 3665 3666 ++GPR_idx; 3667 } else { 3668 needsLoad = true; 3669 ArgSize = PtrByteSize; 3670 } 3671 // All int arguments reserve stack space in the Darwin ABI. 3672 ArgOffset += PtrByteSize; 3673 break; 3674 } 3675 // FALLTHROUGH 3676 case MVT::i64: // PPC64 3677 if (GPR_idx != Num_GPR_Regs) { 3678 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3679 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3680 3681 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3682 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3683 // value to MVT::i64 and then truncate to the correct register size. 3684 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3685 3686 ++GPR_idx; 3687 } else { 3688 needsLoad = true; 3689 ArgSize = PtrByteSize; 3690 } 3691 // All int arguments reserve stack space in the Darwin ABI. 3692 ArgOffset += 8; 3693 break; 3694 3695 case MVT::f32: 3696 case MVT::f64: 3697 // Every 4 bytes of argument space consumes one of the GPRs available for 3698 // argument passing. 3699 if (GPR_idx != Num_GPR_Regs) { 3700 ++GPR_idx; 3701 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 3702 ++GPR_idx; 3703 } 3704 if (FPR_idx != Num_FPR_Regs) { 3705 unsigned VReg; 3706 3707 if (ObjectVT == MVT::f32) 3708 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 3709 else 3710 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 3711 3712 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3713 ++FPR_idx; 3714 } else { 3715 needsLoad = true; 3716 } 3717 3718 // All FP arguments reserve stack space in the Darwin ABI. 3719 ArgOffset += isPPC64 ? 8 : ObjSize; 3720 break; 3721 case MVT::v4f32: 3722 case MVT::v4i32: 3723 case MVT::v8i16: 3724 case MVT::v16i8: 3725 // Note that vector arguments in registers don't reserve stack space, 3726 // except in varargs functions. 3727 if (VR_idx != Num_VR_Regs) { 3728 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 3729 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 3730 if (isVarArg) { 3731 while ((ArgOffset % 16) != 0) { 3732 ArgOffset += PtrByteSize; 3733 if (GPR_idx != Num_GPR_Regs) 3734 GPR_idx++; 3735 } 3736 ArgOffset += 16; 3737 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 3738 } 3739 ++VR_idx; 3740 } else { 3741 if (!isVarArg && !isPPC64) { 3742 // Vectors go after all the nonvectors. 3743 CurArgOffset = VecArgOffset; 3744 VecArgOffset += 16; 3745 } else { 3746 // Vectors are aligned. 3747 ArgOffset = ((ArgOffset+15)/16)*16; 3748 CurArgOffset = ArgOffset; 3749 ArgOffset += 16; 3750 } 3751 needsLoad = true; 3752 } 3753 break; 3754 } 3755 3756 // We need to load the argument to a virtual register if we determined above 3757 // that we ran out of physical registers of the appropriate type. 3758 if (needsLoad) { 3759 int FI = MFI->CreateFixedObject(ObjSize, 3760 CurArgOffset + (ArgSize - ObjSize), 3761 isImmutable); 3762 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3763 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), 3764 false, false, false, 0); 3765 } 3766 3767 InVals.push_back(ArgVal); 3768 } 3769 3770 // Allow for Altivec parameters at the end, if needed. 3771 if (nAltivecParamsAtEnd) { 3772 MinReservedArea = ((MinReservedArea+15)/16)*16; 3773 MinReservedArea += 16*nAltivecParamsAtEnd; 3774 } 3775 3776 // Area that is at least reserved in the caller of this function. 3777 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 3778 3779 // Set the size that is at least reserved in caller of this function. Tail 3780 // call optimized functions' reserved stack space needs to be aligned so that 3781 // taking the difference between two stack areas will result in an aligned 3782 // stack. 3783 MinReservedArea = 3784 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3785 FuncInfo->setMinReservedArea(MinReservedArea); 3786 3787 // If the function takes variable number of arguments, make a frame index for 3788 // the start of the first vararg value... for expansion of llvm.va_start. 3789 if (isVarArg) { 3790 int Depth = ArgOffset; 3791 3792 FuncInfo->setVarArgsFrameIndex( 3793 MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, 3794 Depth, true)); 3795 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3796 3797 // If this function is vararg, store any remaining integer argument regs 3798 // to their spots on the stack so that they may be loaded by deferencing the 3799 // result of va_next. 3800 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 3801 unsigned VReg; 3802 3803 if (isPPC64) 3804 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3805 else 3806 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 3807 3808 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3809 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3810 MachinePointerInfo(), false, false, 0); 3811 MemOps.push_back(Store); 3812 // Increment the address by four for the next argument to store 3813 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3814 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3815 } 3816 } 3817 3818 if (!MemOps.empty()) 3819 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3820 3821 return Chain; 3822 } 3823 3824 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 3825 /// adjusted to accommodate the arguments for the tailcall. 3826 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 3827 unsigned ParamSize) { 3828 3829 if (!isTailCall) return 0; 3830 3831 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 3832 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 3833 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 3834 // Remember only if the new adjustement is bigger. 3835 if (SPDiff < FI->getTailCallSPDelta()) 3836 FI->setTailCallSPDelta(SPDiff); 3837 3838 return SPDiff; 3839 } 3840 3841 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 3842 /// for tail call optimization. Targets which want to do tail call 3843 /// optimization should implement this function. 3844 bool 3845 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 3846 CallingConv::ID CalleeCC, 3847 bool isVarArg, 3848 const SmallVectorImpl<ISD::InputArg> &Ins, 3849 SelectionDAG& DAG) const { 3850 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 3851 return false; 3852 3853 // Variable argument functions are not supported. 3854 if (isVarArg) 3855 return false; 3856 3857 MachineFunction &MF = DAG.getMachineFunction(); 3858 CallingConv::ID CallerCC = MF.getFunction()->getCallingConv(); 3859 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 3860 // Functions containing by val parameters are not supported. 3861 for (unsigned i = 0; i != Ins.size(); i++) { 3862 ISD::ArgFlagsTy Flags = Ins[i].Flags; 3863 if (Flags.isByVal()) return false; 3864 } 3865 3866 // Non-PIC/GOT tail calls are supported. 3867 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 3868 return true; 3869 3870 // At the moment we can only do local tail calls (in same module, hidden 3871 // or protected) if we are generating PIC. 3872 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 3873 return G->getGlobal()->hasHiddenVisibility() 3874 || G->getGlobal()->hasProtectedVisibility(); 3875 } 3876 3877 return false; 3878 } 3879 3880 /// isCallCompatibleAddress - Return the immediate to use if the specified 3881 /// 32-bit value is representable in the immediate field of a BxA instruction. 3882 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 3883 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 3884 if (!C) return nullptr; 3885 3886 int Addr = C->getZExtValue(); 3887 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 3888 SignExtend32<26>(Addr) != Addr) 3889 return nullptr; // Top 6 bits have to be sext of immediate. 3890 3891 return DAG.getConstant((int)C->getZExtValue() >> 2, SDLoc(Op), 3892 DAG.getTargetLoweringInfo().getPointerTy( 3893 DAG.getDataLayout())).getNode(); 3894 } 3895 3896 namespace { 3897 3898 struct TailCallArgumentInfo { 3899 SDValue Arg; 3900 SDValue FrameIdxOp; 3901 int FrameIdx; 3902 3903 TailCallArgumentInfo() : FrameIdx(0) {} 3904 }; 3905 } 3906 3907 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 3908 static void 3909 StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, 3910 SDValue Chain, 3911 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 3912 SmallVectorImpl<SDValue> &MemOpChains, 3913 SDLoc dl) { 3914 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 3915 SDValue Arg = TailCallArgs[i].Arg; 3916 SDValue FIN = TailCallArgs[i].FrameIdxOp; 3917 int FI = TailCallArgs[i].FrameIdx; 3918 // Store relative to framepointer. 3919 MemOpChains.push_back(DAG.getStore( 3920 Chain, dl, Arg, FIN, 3921 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), false, 3922 false, 0)); 3923 } 3924 } 3925 3926 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 3927 /// the appropriate stack slot for the tail call optimized function call. 3928 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, 3929 MachineFunction &MF, 3930 SDValue Chain, 3931 SDValue OldRetAddr, 3932 SDValue OldFP, 3933 int SPDiff, 3934 bool isPPC64, 3935 bool isDarwinABI, 3936 SDLoc dl) { 3937 if (SPDiff) { 3938 // Calculate the new stack slot for the return address. 3939 int SlotSize = isPPC64 ? 8 : 4; 3940 const PPCFrameLowering *FL = 3941 MF.getSubtarget<PPCSubtarget>().getFrameLowering(); 3942 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 3943 int NewRetAddr = MF.getFrameInfo()->CreateFixedObject(SlotSize, 3944 NewRetAddrLoc, true); 3945 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3946 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 3947 Chain = DAG.getStore( 3948 Chain, dl, OldRetAddr, NewRetAddrFrIdx, 3949 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewRetAddr), 3950 false, false, 0); 3951 3952 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 3953 // slot as the FP is never overwritten. 3954 if (isDarwinABI) { 3955 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 3956 int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc, 3957 true); 3958 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 3959 Chain = DAG.getStore( 3960 Chain, dl, OldFP, NewFramePtrIdx, 3961 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFPIdx), 3962 false, false, 0); 3963 } 3964 } 3965 return Chain; 3966 } 3967 3968 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 3969 /// the position of the argument. 3970 static void 3971 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 3972 SDValue Arg, int SPDiff, unsigned ArgOffset, 3973 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 3974 int Offset = ArgOffset + SPDiff; 3975 uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; 3976 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 3977 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 3978 SDValue FIN = DAG.getFrameIndex(FI, VT); 3979 TailCallArgumentInfo Info; 3980 Info.Arg = Arg; 3981 Info.FrameIdxOp = FIN; 3982 Info.FrameIdx = FI; 3983 TailCallArguments.push_back(Info); 3984 } 3985 3986 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 3987 /// stack slot. Returns the chain as result and the loaded frame pointers in 3988 /// LROpOut/FPOpout. Used when tail calling. 3989 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, 3990 int SPDiff, 3991 SDValue Chain, 3992 SDValue &LROpOut, 3993 SDValue &FPOpOut, 3994 bool isDarwinABI, 3995 SDLoc dl) const { 3996 if (SPDiff) { 3997 // Load the LR and FP stack slot for later adjusting. 3998 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 3999 LROpOut = getReturnAddrFrameIndex(DAG); 4000 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), 4001 false, false, false, 0); 4002 Chain = SDValue(LROpOut.getNode(), 1); 4003 4004 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4005 // slot as the FP is never overwritten. 4006 if (isDarwinABI) { 4007 FPOpOut = getFramePointerFrameIndex(DAG); 4008 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo(), 4009 false, false, false, 0); 4010 Chain = SDValue(FPOpOut.getNode(), 1); 4011 } 4012 } 4013 return Chain; 4014 } 4015 4016 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4017 /// by "Src" to address "Dst" of size "Size". Alignment information is 4018 /// specified by the specific parameter attribute. The copy will be passed as 4019 /// a byval function parameter. 4020 /// Sometimes what we are copying is the end of a larger object, the part that 4021 /// does not fit in registers. 4022 static SDValue 4023 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, 4024 ISD::ArgFlagsTy Flags, SelectionDAG &DAG, 4025 SDLoc dl) { 4026 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4027 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4028 false, false, false, MachinePointerInfo(), 4029 MachinePointerInfo()); 4030 } 4031 4032 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4033 /// tail calls. 4034 static void 4035 LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, 4036 SDValue Arg, SDValue PtrOff, int SPDiff, 4037 unsigned ArgOffset, bool isPPC64, bool isTailCall, 4038 bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4039 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, 4040 SDLoc dl) { 4041 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4042 if (!isTailCall) { 4043 if (isVector) { 4044 SDValue StackPtr; 4045 if (isPPC64) 4046 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4047 else 4048 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4049 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4050 DAG.getConstant(ArgOffset, dl, PtrVT)); 4051 } 4052 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4053 MachinePointerInfo(), false, false, 0)); 4054 // Calculate and remember argument location. 4055 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4056 TailCallArguments); 4057 } 4058 4059 static 4060 void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4061 SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, 4062 SDValue LROp, SDValue FPOp, bool isDarwinABI, 4063 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4064 MachineFunction &MF = DAG.getMachineFunction(); 4065 4066 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4067 // might overwrite each other in case of tail call optimization. 4068 SmallVector<SDValue, 8> MemOpChains2; 4069 // Do not flag preceding copytoreg stuff together with the following stuff. 4070 InFlag = SDValue(); 4071 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4072 MemOpChains2, dl); 4073 if (!MemOpChains2.empty()) 4074 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4075 4076 // Store the return address to the appropriate stack slot. 4077 Chain = EmitTailCallStoreFPAndRetAddr(DAG, MF, Chain, LROp, FPOp, SPDiff, 4078 isPPC64, isDarwinABI, dl); 4079 4080 // Emit callseq_end just before tailcall node. 4081 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4082 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4083 InFlag = Chain.getValue(1); 4084 } 4085 4086 // Is this global address that of a function that can be called by name? (as 4087 // opposed to something that must hold a descriptor for an indirect call). 4088 static bool isFunctionGlobalAddress(SDValue Callee) { 4089 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 4090 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 4091 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 4092 return false; 4093 4094 return G->getGlobal()->getType()->getElementType()->isFunctionTy(); 4095 } 4096 4097 return false; 4098 } 4099 4100 static 4101 unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, 4102 SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff, 4103 bool isTailCall, bool IsPatchPoint, bool hasNest, 4104 SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass, 4105 SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys, 4106 ImmutableCallSite *CS, const PPCSubtarget &Subtarget) { 4107 4108 bool isPPC64 = Subtarget.isPPC64(); 4109 bool isSVR4ABI = Subtarget.isSVR4ABI(); 4110 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4111 4112 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4113 NodeTys.push_back(MVT::Other); // Returns a chain 4114 NodeTys.push_back(MVT::Glue); // Returns a flag for retval copy to use. 4115 4116 unsigned CallOpc = PPCISD::CALL; 4117 4118 bool needIndirectCall = true; 4119 if (!isSVR4ABI || !isPPC64) 4120 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) { 4121 // If this is an absolute destination address, use the munged value. 4122 Callee = SDValue(Dest, 0); 4123 needIndirectCall = false; 4124 } 4125 4126 if (isFunctionGlobalAddress(Callee)) { 4127 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 4128 // A call to a TLS address is actually an indirect call to a 4129 // thread-specific pointer. 4130 unsigned OpFlags = 0; 4131 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4132 (Subtarget.getTargetTriple().isMacOSX() && 4133 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5)) && 4134 !G->getGlobal()->isStrongDefinitionForLinker()) || 4135 (Subtarget.isTargetELF() && !isPPC64 && 4136 !G->getGlobal()->hasLocalLinkage() && 4137 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4138 // PC-relative references to external symbols should go through $stub, 4139 // unless we're building with the leopard linker or later, which 4140 // automatically synthesizes these stubs. 4141 OpFlags = PPCII::MO_PLT_OR_STUB; 4142 } 4143 4144 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, 4145 // every direct call is) turn it into a TargetGlobalAddress / 4146 // TargetExternalSymbol node so that legalize doesn't hack it. 4147 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, 4148 Callee.getValueType(), 0, OpFlags); 4149 needIndirectCall = false; 4150 } 4151 4152 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 4153 unsigned char OpFlags = 0; 4154 4155 if ((DAG.getTarget().getRelocationModel() != Reloc::Static && 4156 (Subtarget.getTargetTriple().isMacOSX() && 4157 Subtarget.getTargetTriple().isMacOSXVersionLT(10, 5))) || 4158 (Subtarget.isTargetELF() && !isPPC64 && 4159 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) { 4160 // PC-relative references to external symbols should go through $stub, 4161 // unless we're building with the leopard linker or later, which 4162 // automatically synthesizes these stubs. 4163 OpFlags = PPCII::MO_PLT_OR_STUB; 4164 } 4165 4166 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), 4167 OpFlags); 4168 needIndirectCall = false; 4169 } 4170 4171 if (IsPatchPoint) { 4172 // We'll form an invalid direct call when lowering a patchpoint; the full 4173 // sequence for an indirect call is complicated, and many of the 4174 // instructions introduced might have side effects (and, thus, can't be 4175 // removed later). The call itself will be removed as soon as the 4176 // argument/return lowering is complete, so the fact that it has the wrong 4177 // kind of operands should not really matter. 4178 needIndirectCall = false; 4179 } 4180 4181 if (needIndirectCall) { 4182 // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair 4183 // to do the call, we can't use PPCISD::CALL. 4184 SDValue MTCTROps[] = {Chain, Callee, InFlag}; 4185 4186 if (isSVR4ABI && isPPC64 && !isELFv2ABI) { 4187 // Function pointers in the 64-bit SVR4 ABI do not point to the function 4188 // entry point, but to the function descriptor (the function entry point 4189 // address is part of the function descriptor though). 4190 // The function descriptor is a three doubleword structure with the 4191 // following fields: function entry point, TOC base address and 4192 // environment pointer. 4193 // Thus for a call through a function pointer, the following actions need 4194 // to be performed: 4195 // 1. Save the TOC of the caller in the TOC save area of its stack 4196 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 4197 // 2. Load the address of the function entry point from the function 4198 // descriptor. 4199 // 3. Load the TOC of the callee from the function descriptor into r2. 4200 // 4. Load the environment pointer from the function descriptor into 4201 // r11. 4202 // 5. Branch to the function entry point address. 4203 // 6. On return of the callee, the TOC of the caller needs to be 4204 // restored (this is done in FinishCall()). 4205 // 4206 // The loads are scheduled at the beginning of the call sequence, and the 4207 // register copies are flagged together to ensure that no other 4208 // operations can be scheduled in between. E.g. without flagging the 4209 // copies together, a TOC access in the caller could be scheduled between 4210 // the assignment of the callee TOC and the branch to the callee, which 4211 // results in the TOC access going through the TOC of the callee instead 4212 // of going through the TOC of the caller, which leads to incorrect code. 4213 4214 // Load the address of the function entry point from the function 4215 // descriptor. 4216 SDValue LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-1); 4217 if (LDChain.getValueType() == MVT::Glue) 4218 LDChain = CallSeqStart.getValue(CallSeqStart->getNumValues()-2); 4219 4220 bool LoadsInv = Subtarget.hasInvariantFunctionDescriptors(); 4221 4222 MachinePointerInfo MPI(CS ? CS->getCalledValue() : nullptr); 4223 SDValue LoadFuncPtr = DAG.getLoad(MVT::i64, dl, LDChain, Callee, MPI, 4224 false, false, LoadsInv, 8); 4225 4226 // Load environment pointer into r11. 4227 SDValue PtrOff = DAG.getIntPtrConstant(16, dl); 4228 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); 4229 SDValue LoadEnvPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddPtr, 4230 MPI.getWithOffset(16), false, false, 4231 LoadsInv, 8); 4232 4233 SDValue TOCOff = DAG.getIntPtrConstant(8, dl); 4234 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, TOCOff); 4235 SDValue TOCPtr = DAG.getLoad(MVT::i64, dl, LDChain, AddTOC, 4236 MPI.getWithOffset(8), false, false, 4237 LoadsInv, 8); 4238 4239 setUsesTOCBasePtr(DAG); 4240 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, PPC::X2, TOCPtr, 4241 InFlag); 4242 Chain = TOCVal.getValue(0); 4243 InFlag = TOCVal.getValue(1); 4244 4245 // If the function call has an explicit 'nest' parameter, it takes the 4246 // place of the environment pointer. 4247 if (!hasNest) { 4248 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, 4249 InFlag); 4250 4251 Chain = EnvVal.getValue(0); 4252 InFlag = EnvVal.getValue(1); 4253 } 4254 4255 MTCTROps[0] = Chain; 4256 MTCTROps[1] = LoadFuncPtr; 4257 MTCTROps[2] = InFlag; 4258 } 4259 4260 Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, 4261 makeArrayRef(MTCTROps, InFlag.getNode() ? 3 : 2)); 4262 InFlag = Chain.getValue(1); 4263 4264 NodeTys.clear(); 4265 NodeTys.push_back(MVT::Other); 4266 NodeTys.push_back(MVT::Glue); 4267 Ops.push_back(Chain); 4268 CallOpc = PPCISD::BCTRL; 4269 Callee.setNode(nullptr); 4270 // Add use of X11 (holding environment pointer) 4271 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) 4272 Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); 4273 // Add CTR register as callee so a bctr can be emitted later. 4274 if (isTailCall) 4275 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::CTR8 : PPC::CTR, PtrVT)); 4276 } 4277 4278 // If this is a direct call, pass the chain and the callee. 4279 if (Callee.getNode()) { 4280 Ops.push_back(Chain); 4281 Ops.push_back(Callee); 4282 } 4283 // If this is a tail call add stack pointer delta. 4284 if (isTailCall) 4285 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 4286 4287 // Add argument registers to the end of the list so that they are known live 4288 // into the call. 4289 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 4290 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 4291 RegsToPass[i].second.getValueType())); 4292 4293 // All calls, in both the ELF V1 and V2 ABIs, need the TOC register live 4294 // into the call. 4295 if (isSVR4ABI && isPPC64 && !IsPatchPoint) { 4296 setUsesTOCBasePtr(DAG); 4297 Ops.push_back(DAG.getRegister(PPC::X2, PtrVT)); 4298 } 4299 4300 return CallOpc; 4301 } 4302 4303 static 4304 bool isLocalCall(const SDValue &Callee) 4305 { 4306 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4307 return G->getGlobal()->isStrongDefinitionForLinker(); 4308 return false; 4309 } 4310 4311 SDValue 4312 PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 4313 CallingConv::ID CallConv, bool isVarArg, 4314 const SmallVectorImpl<ISD::InputArg> &Ins, 4315 SDLoc dl, SelectionDAG &DAG, 4316 SmallVectorImpl<SDValue> &InVals) const { 4317 4318 SmallVector<CCValAssign, 16> RVLocs; 4319 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 4320 *DAG.getContext()); 4321 CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); 4322 4323 // Copy all of the result registers out of their specified physreg. 4324 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 4325 CCValAssign &VA = RVLocs[i]; 4326 assert(VA.isRegLoc() && "Can only return in registers!"); 4327 4328 SDValue Val = DAG.getCopyFromReg(Chain, dl, 4329 VA.getLocReg(), VA.getLocVT(), InFlag); 4330 Chain = Val.getValue(1); 4331 InFlag = Val.getValue(2); 4332 4333 switch (VA.getLocInfo()) { 4334 default: llvm_unreachable("Unknown loc info!"); 4335 case CCValAssign::Full: break; 4336 case CCValAssign::AExt: 4337 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4338 break; 4339 case CCValAssign::ZExt: 4340 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 4341 DAG.getValueType(VA.getValVT())); 4342 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4343 break; 4344 case CCValAssign::SExt: 4345 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 4346 DAG.getValueType(VA.getValVT())); 4347 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 4348 break; 4349 } 4350 4351 InVals.push_back(Val); 4352 } 4353 4354 return Chain; 4355 } 4356 4357 SDValue 4358 PPCTargetLowering::FinishCall(CallingConv::ID CallConv, SDLoc dl, 4359 bool isTailCall, bool isVarArg, bool IsPatchPoint, 4360 bool hasNest, SelectionDAG &DAG, 4361 SmallVector<std::pair<unsigned, SDValue>, 8> 4362 &RegsToPass, 4363 SDValue InFlag, SDValue Chain, 4364 SDValue CallSeqStart, SDValue &Callee, 4365 int SPDiff, unsigned NumBytes, 4366 const SmallVectorImpl<ISD::InputArg> &Ins, 4367 SmallVectorImpl<SDValue> &InVals, 4368 ImmutableCallSite *CS) const { 4369 4370 std::vector<EVT> NodeTys; 4371 SmallVector<SDValue, 8> Ops; 4372 unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl, 4373 SPDiff, isTailCall, IsPatchPoint, hasNest, 4374 RegsToPass, Ops, NodeTys, CS, Subtarget); 4375 4376 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 4377 if (isVarArg && Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) 4378 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 4379 4380 // When performing tail call optimization the callee pops its arguments off 4381 // the stack. Account for this here so these bytes can be pushed back on in 4382 // PPCFrameLowering::eliminateCallFramePseudoInstr. 4383 int BytesCalleePops = 4384 (CallConv == CallingConv::Fast && 4385 getTargetMachine().Options.GuaranteedTailCallOpt) ? NumBytes : 0; 4386 4387 // Add a register mask operand representing the call-preserved registers. 4388 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 4389 const uint32_t *Mask = 4390 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 4391 assert(Mask && "Missing call preserved mask for calling convention"); 4392 Ops.push_back(DAG.getRegisterMask(Mask)); 4393 4394 if (InFlag.getNode()) 4395 Ops.push_back(InFlag); 4396 4397 // Emit tail call. 4398 if (isTailCall) { 4399 assert(((Callee.getOpcode() == ISD::Register && 4400 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 4401 Callee.getOpcode() == ISD::TargetExternalSymbol || 4402 Callee.getOpcode() == ISD::TargetGlobalAddress || 4403 isa<ConstantSDNode>(Callee)) && 4404 "Expecting an global address, external symbol, absolute value or register"); 4405 4406 DAG.getMachineFunction().getFrameInfo()->setHasTailCall(); 4407 return DAG.getNode(PPCISD::TC_RETURN, dl, MVT::Other, Ops); 4408 } 4409 4410 // Add a NOP immediately after the branch instruction when using the 64-bit 4411 // SVR4 ABI. At link time, if caller and callee are in a different module and 4412 // thus have a different TOC, the call will be replaced with a call to a stub 4413 // function which saves the current TOC, loads the TOC of the callee and 4414 // branches to the callee. The NOP will be replaced with a load instruction 4415 // which restores the TOC of the caller from the TOC save slot of the current 4416 // stack frame. If caller and callee belong to the same module (and have the 4417 // same TOC), the NOP will remain unchanged. 4418 4419 if (!isTailCall && Subtarget.isSVR4ABI()&& Subtarget.isPPC64() && 4420 !IsPatchPoint) { 4421 if (CallOpc == PPCISD::BCTRL) { 4422 // This is a call through a function pointer. 4423 // Restore the caller TOC from the save area into R2. 4424 // See PrepareCall() for more information about calls through function 4425 // pointers in the 64-bit SVR4 ABI. 4426 // We are using a target-specific load with r2 hard coded, because the 4427 // result of a target-independent load would never go directly into r2, 4428 // since r2 is a reserved register (which prevents the register allocator 4429 // from allocating it), resulting in an additional register being 4430 // allocated and an unnecessary move instruction being generated. 4431 CallOpc = PPCISD::BCTRL_LOAD_TOC; 4432 4433 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4434 SDValue StackPtr = DAG.getRegister(PPC::X1, PtrVT); 4435 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 4436 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 4437 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, TOCOff); 4438 4439 // The address needs to go after the chain input but before the flag (or 4440 // any other variadic arguments). 4441 Ops.insert(std::next(Ops.begin()), AddTOC); 4442 } else if ((CallOpc == PPCISD::CALL) && 4443 (!isLocalCall(Callee) || 4444 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) 4445 // Otherwise insert NOP for non-local calls. 4446 CallOpc = PPCISD::CALL_NOP; 4447 } 4448 4449 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops); 4450 InFlag = Chain.getValue(1); 4451 4452 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4453 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 4454 InFlag, dl); 4455 if (!Ins.empty()) 4456 InFlag = Chain.getValue(1); 4457 4458 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, 4459 Ins, dl, DAG, InVals); 4460 } 4461 4462 SDValue 4463 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 4464 SmallVectorImpl<SDValue> &InVals) const { 4465 SelectionDAG &DAG = CLI.DAG; 4466 SDLoc &dl = CLI.DL; 4467 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 4468 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 4469 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 4470 SDValue Chain = CLI.Chain; 4471 SDValue Callee = CLI.Callee; 4472 bool &isTailCall = CLI.IsTailCall; 4473 CallingConv::ID CallConv = CLI.CallConv; 4474 bool isVarArg = CLI.IsVarArg; 4475 bool IsPatchPoint = CLI.IsPatchPoint; 4476 ImmutableCallSite *CS = CLI.CS; 4477 4478 if (isTailCall) 4479 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 4480 Ins, DAG); 4481 4482 if (!isTailCall && CS && CS->isMustTailCall()) 4483 report_fatal_error("failed to perform tail call elimination on a call " 4484 "site marked musttail"); 4485 4486 if (Subtarget.isSVR4ABI()) { 4487 if (Subtarget.isPPC64()) 4488 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 4489 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4490 dl, DAG, InVals, CS); 4491 else 4492 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 4493 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4494 dl, DAG, InVals, CS); 4495 } 4496 4497 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 4498 isTailCall, IsPatchPoint, Outs, OutVals, Ins, 4499 dl, DAG, InVals, CS); 4500 } 4501 4502 SDValue 4503 PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, 4504 CallingConv::ID CallConv, bool isVarArg, 4505 bool isTailCall, bool IsPatchPoint, 4506 const SmallVectorImpl<ISD::OutputArg> &Outs, 4507 const SmallVectorImpl<SDValue> &OutVals, 4508 const SmallVectorImpl<ISD::InputArg> &Ins, 4509 SDLoc dl, SelectionDAG &DAG, 4510 SmallVectorImpl<SDValue> &InVals, 4511 ImmutableCallSite *CS) const { 4512 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 4513 // of the 32-bit SVR4 ABI stack frame layout. 4514 4515 assert((CallConv == CallingConv::C || 4516 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 4517 4518 unsigned PtrByteSize = 4; 4519 4520 MachineFunction &MF = DAG.getMachineFunction(); 4521 4522 // Mark this function as potentially containing a function that contains a 4523 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4524 // and restoring the callers stack pointer in this functions epilog. This is 4525 // done because by tail calling the called function might overwrite the value 4526 // in this function's (MF) stack pointer stack slot 0(SP). 4527 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4528 CallConv == CallingConv::Fast) 4529 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4530 4531 // Count how many bytes are to be pushed on the stack, including the linkage 4532 // area, parameter list area and the part of the local variable space which 4533 // contains copies of aggregates which are passed by value. 4534 4535 // Assign locations to all of the outgoing arguments. 4536 SmallVector<CCValAssign, 16> ArgLocs; 4537 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 4538 *DAG.getContext()); 4539 4540 // Reserve space for the linkage area on the stack. 4541 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 4542 PtrByteSize); 4543 4544 if (isVarArg) { 4545 // Handle fixed and variable vector arguments differently. 4546 // Fixed vector arguments go into registers as long as registers are 4547 // available. Variable vector arguments always go into memory. 4548 unsigned NumArgs = Outs.size(); 4549 4550 for (unsigned i = 0; i != NumArgs; ++i) { 4551 MVT ArgVT = Outs[i].VT; 4552 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 4553 bool Result; 4554 4555 if (Outs[i].IsFixed) { 4556 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 4557 CCInfo); 4558 } else { 4559 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 4560 ArgFlags, CCInfo); 4561 } 4562 4563 if (Result) { 4564 #ifndef NDEBUG 4565 errs() << "Call operand #" << i << " has unhandled type " 4566 << EVT(ArgVT).getEVTString() << "\n"; 4567 #endif 4568 llvm_unreachable(nullptr); 4569 } 4570 } 4571 } else { 4572 // All arguments are treated the same. 4573 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 4574 } 4575 4576 // Assign locations to all of the outgoing aggregate by value arguments. 4577 SmallVector<CCValAssign, 16> ByValArgLocs; 4578 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 4579 ByValArgLocs, *DAG.getContext()); 4580 4581 // Reserve stack space for the allocations in CCInfo. 4582 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 4583 4584 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 4585 4586 // Size of the linkage area, parameter list area and the part of the local 4587 // space variable where copies of aggregates which are passed by value are 4588 // stored. 4589 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 4590 4591 // Calculate by how many bytes the stack has to be adjusted in case of tail 4592 // call optimization. 4593 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4594 4595 // Adjust the stack pointer for the new arguments... 4596 // These operations are automatically eliminated by the prolog/epilog pass 4597 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4598 dl); 4599 SDValue CallSeqStart = Chain; 4600 4601 // Load the return address and frame pointer so it can be moved somewhere else 4602 // later. 4603 SDValue LROp, FPOp; 4604 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, false, 4605 dl); 4606 4607 // Set up a copy of the stack pointer for use loading and storing any 4608 // arguments that may not fit in the registers available for argument 4609 // passing. 4610 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4611 4612 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4613 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4614 SmallVector<SDValue, 8> MemOpChains; 4615 4616 bool seenFloatArg = false; 4617 // Walk the register/memloc assignments, inserting copies/loads. 4618 for (unsigned i = 0, j = 0, e = ArgLocs.size(); 4619 i != e; 4620 ++i) { 4621 CCValAssign &VA = ArgLocs[i]; 4622 SDValue Arg = OutVals[i]; 4623 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4624 4625 if (Flags.isByVal()) { 4626 // Argument is an aggregate which is passed by value, thus we need to 4627 // create a copy of it in the local variable space of the current stack 4628 // frame (which is the stack frame of the caller) and pass the address of 4629 // this copy to the callee. 4630 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 4631 CCValAssign &ByValVA = ByValArgLocs[j++]; 4632 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 4633 4634 // Memory reserved in the local variable space of the callers stack frame. 4635 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 4636 4637 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4638 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4639 StackPtr, PtrOff); 4640 4641 // Create a copy of the argument in the local area of the current 4642 // stack frame. 4643 SDValue MemcpyCall = 4644 CreateCopyOfByValArgument(Arg, PtrOff, 4645 CallSeqStart.getNode()->getOperand(0), 4646 Flags, DAG, dl); 4647 4648 // This must go outside the CALLSEQ_START..END. 4649 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4650 CallSeqStart.getNode()->getOperand(1), 4651 SDLoc(MemcpyCall)); 4652 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4653 NewCallSeqStart.getNode()); 4654 Chain = CallSeqStart = NewCallSeqStart; 4655 4656 // Pass the address of the aggregate copy on the stack either in a 4657 // physical register or in the parameter list area of the current stack 4658 // frame to the callee. 4659 Arg = PtrOff; 4660 } 4661 4662 if (VA.isRegLoc()) { 4663 if (Arg.getValueType() == MVT::i1) 4664 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Arg); 4665 4666 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 4667 // Put argument in a physical register. 4668 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 4669 } else { 4670 // Put argument in the parameter list area of the current stack frame. 4671 assert(VA.isMemLoc()); 4672 unsigned LocMemOffset = VA.getLocMemOffset(); 4673 4674 if (!isTailCall) { 4675 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 4676 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 4677 StackPtr, PtrOff); 4678 4679 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, 4680 MachinePointerInfo(), 4681 false, false, 0)); 4682 } else { 4683 // Calculate and remember argument location. 4684 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 4685 TailCallArguments); 4686 } 4687 } 4688 } 4689 4690 if (!MemOpChains.empty()) 4691 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 4692 4693 // Build a sequence of copy-to-reg nodes chained together with token chain 4694 // and flag operands which copy the outgoing args into the appropriate regs. 4695 SDValue InFlag; 4696 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 4697 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 4698 RegsToPass[i].second, InFlag); 4699 InFlag = Chain.getValue(1); 4700 } 4701 4702 // Set CR bit 6 to true if this is a vararg call with floating args passed in 4703 // registers. 4704 if (isVarArg) { 4705 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 4706 SDValue Ops[] = { Chain, InFlag }; 4707 4708 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 4709 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 4710 4711 InFlag = Chain.getValue(1); 4712 } 4713 4714 if (isTailCall) 4715 PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, 4716 false, TailCallArguments); 4717 4718 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 4719 /* unused except on PPC64 ELFv1 */ false, DAG, 4720 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 4721 NumBytes, Ins, InVals, CS); 4722 } 4723 4724 // Copy an argument into memory, being careful to do this outside the 4725 // call sequence for the call to which the argument belongs. 4726 SDValue 4727 PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, 4728 SDValue CallSeqStart, 4729 ISD::ArgFlagsTy Flags, 4730 SelectionDAG &DAG, 4731 SDLoc dl) const { 4732 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 4733 CallSeqStart.getNode()->getOperand(0), 4734 Flags, DAG, dl); 4735 // The MEMCPY must go outside the CALLSEQ_START..END. 4736 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, 4737 CallSeqStart.getNode()->getOperand(1), 4738 SDLoc(MemcpyCall)); 4739 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 4740 NewCallSeqStart.getNode()); 4741 return NewCallSeqStart; 4742 } 4743 4744 SDValue 4745 PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, 4746 CallingConv::ID CallConv, bool isVarArg, 4747 bool isTailCall, bool IsPatchPoint, 4748 const SmallVectorImpl<ISD::OutputArg> &Outs, 4749 const SmallVectorImpl<SDValue> &OutVals, 4750 const SmallVectorImpl<ISD::InputArg> &Ins, 4751 SDLoc dl, SelectionDAG &DAG, 4752 SmallVectorImpl<SDValue> &InVals, 4753 ImmutableCallSite *CS) const { 4754 4755 bool isELFv2ABI = Subtarget.isELFv2ABI(); 4756 bool isLittleEndian = Subtarget.isLittleEndian(); 4757 unsigned NumOps = Outs.size(); 4758 bool hasNest = false; 4759 4760 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4761 unsigned PtrByteSize = 8; 4762 4763 MachineFunction &MF = DAG.getMachineFunction(); 4764 4765 // Mark this function as potentially containing a function that contains a 4766 // tail call. As a consequence the frame pointer will be used for dynamicalloc 4767 // and restoring the callers stack pointer in this functions epilog. This is 4768 // done because by tail calling the called function might overwrite the value 4769 // in this function's (MF) stack pointer stack slot 0(SP). 4770 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4771 CallConv == CallingConv::Fast) 4772 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 4773 4774 assert(!(CallConv == CallingConv::Fast && isVarArg) && 4775 "fastcc not supported on varargs functions"); 4776 4777 // Count how many bytes are to be pushed on the stack, including the linkage 4778 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 4779 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 4780 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 4781 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4782 unsigned NumBytes = LinkageSize; 4783 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4784 unsigned &QFPR_idx = FPR_idx; 4785 4786 static const MCPhysReg GPR[] = { 4787 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4788 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4789 }; 4790 static const MCPhysReg VR[] = { 4791 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4792 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4793 }; 4794 static const MCPhysReg VSRH[] = { 4795 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8, 4796 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13 4797 }; 4798 4799 const unsigned NumGPRs = array_lengthof(GPR); 4800 const unsigned NumFPRs = 13; 4801 const unsigned NumVRs = array_lengthof(VR); 4802 const unsigned NumQFPRs = NumFPRs; 4803 4804 // When using the fast calling convention, we don't provide backing for 4805 // arguments that will be in registers. 4806 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 4807 4808 // Add up all the space actually used. 4809 for (unsigned i = 0; i != NumOps; ++i) { 4810 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4811 EVT ArgVT = Outs[i].VT; 4812 EVT OrigVT = Outs[i].ArgVT; 4813 4814 if (Flags.isNest()) 4815 continue; 4816 4817 if (CallConv == CallingConv::Fast) { 4818 if (Flags.isByVal()) 4819 NumGPRsUsed += (Flags.getByValSize()+7)/8; 4820 else 4821 switch (ArgVT.getSimpleVT().SimpleTy) { 4822 default: llvm_unreachable("Unexpected ValueType for argument!"); 4823 case MVT::i1: 4824 case MVT::i32: 4825 case MVT::i64: 4826 if (++NumGPRsUsed <= NumGPRs) 4827 continue; 4828 break; 4829 case MVT::v4i32: 4830 case MVT::v8i16: 4831 case MVT::v16i8: 4832 case MVT::v2f64: 4833 case MVT::v2i64: 4834 case MVT::v1i128: 4835 if (++NumVRsUsed <= NumVRs) 4836 continue; 4837 break; 4838 case MVT::v4f32: 4839 // When using QPX, this is handled like a FP register, otherwise, it 4840 // is an Altivec register. 4841 if (Subtarget.hasQPX()) { 4842 if (++NumFPRsUsed <= NumFPRs) 4843 continue; 4844 } else { 4845 if (++NumVRsUsed <= NumVRs) 4846 continue; 4847 } 4848 break; 4849 case MVT::f32: 4850 case MVT::f64: 4851 case MVT::v4f64: // QPX 4852 case MVT::v4i1: // QPX 4853 if (++NumFPRsUsed <= NumFPRs) 4854 continue; 4855 break; 4856 } 4857 } 4858 4859 /* Respect alignment of argument on the stack. */ 4860 unsigned Align = 4861 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4862 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 4863 4864 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 4865 if (Flags.isInConsecutiveRegsLast()) 4866 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4867 } 4868 4869 unsigned NumBytesActuallyUsed = NumBytes; 4870 4871 // The prolog code of the callee may store up to 8 GPR argument registers to 4872 // the stack, allowing va_start to index over them in memory if its varargs. 4873 // Because we cannot tell if this is needed on the caller side, we have to 4874 // conservatively assume that it is needed. As such, make sure we have at 4875 // least enough stack space for the caller to store the 8 GPRs. 4876 // FIXME: On ELFv2, it may be unnecessary to allocate the parameter area. 4877 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 4878 4879 // Tail call needs the stack to be aligned. 4880 if (getTargetMachine().Options.GuaranteedTailCallOpt && 4881 CallConv == CallingConv::Fast) 4882 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 4883 4884 // Calculate by how many bytes the stack has to be adjusted in case of tail 4885 // call optimization. 4886 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 4887 4888 // To protect arguments on the stack from being clobbered in a tail call, 4889 // force all the loads to happen before doing any other lowering. 4890 if (isTailCall) 4891 Chain = DAG.getStackArgumentTokenFactor(Chain); 4892 4893 // Adjust the stack pointer for the new arguments... 4894 // These operations are automatically eliminated by the prolog/epilog pass 4895 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4896 dl); 4897 SDValue CallSeqStart = Chain; 4898 4899 // Load the return address and frame pointer so it can be move somewhere else 4900 // later. 4901 SDValue LROp, FPOp; 4902 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 4903 dl); 4904 4905 // Set up a copy of the stack pointer for use loading and storing any 4906 // arguments that may not fit in the registers available for argument 4907 // passing. 4908 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4909 4910 // Figure out which arguments are going to go in registers, and which in 4911 // memory. Also, if this is a vararg function, floating point operations 4912 // must be stored to our stack, and loaded into integer regs as well, if 4913 // any integer regs are available for argument passing. 4914 unsigned ArgOffset = LinkageSize; 4915 4916 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 4917 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 4918 4919 SmallVector<SDValue, 8> MemOpChains; 4920 for (unsigned i = 0; i != NumOps; ++i) { 4921 SDValue Arg = OutVals[i]; 4922 ISD::ArgFlagsTy Flags = Outs[i].Flags; 4923 EVT ArgVT = Outs[i].VT; 4924 EVT OrigVT = Outs[i].ArgVT; 4925 4926 // PtrOff will be used to store the current argument to the stack if a 4927 // register cannot be found for it. 4928 SDValue PtrOff; 4929 4930 // We re-align the argument offset for each argument, except when using the 4931 // fast calling convention, when we need to make sure we do that only when 4932 // we'll actually use a stack slot. 4933 auto ComputePtrOff = [&]() { 4934 /* Respect alignment of argument on the stack. */ 4935 unsigned Align = 4936 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 4937 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 4938 4939 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 4940 4941 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 4942 }; 4943 4944 if (CallConv != CallingConv::Fast) { 4945 ComputePtrOff(); 4946 4947 /* Compute GPR index associated with argument offset. */ 4948 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4949 GPR_idx = std::min(GPR_idx, NumGPRs); 4950 } 4951 4952 // Promote integers to 64-bit values. 4953 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 4954 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 4955 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 4956 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 4957 } 4958 4959 // FIXME memcpy is used way more than necessary. Correctness first. 4960 // Note: "by value" is code for passing a structure by value, not 4961 // basic types. 4962 if (Flags.isByVal()) { 4963 // Note: Size includes alignment padding, so 4964 // struct x { short a; char b; } 4965 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 4966 // These are the proper values we need for right-justifying the 4967 // aggregate in a parameter register. 4968 unsigned Size = Flags.getByValSize(); 4969 4970 // An empty aggregate parameter takes up no storage and no 4971 // registers. 4972 if (Size == 0) 4973 continue; 4974 4975 if (CallConv == CallingConv::Fast) 4976 ComputePtrOff(); 4977 4978 // All aggregates smaller than 8 bytes must be passed right-justified. 4979 if (Size==1 || Size==2 || Size==4) { 4980 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 4981 if (GPR_idx != NumGPRs) { 4982 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 4983 MachinePointerInfo(), VT, 4984 false, false, false, 0); 4985 MemOpChains.push_back(Load.getValue(1)); 4986 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 4987 4988 ArgOffset += PtrByteSize; 4989 continue; 4990 } 4991 } 4992 4993 if (GPR_idx == NumGPRs && Size < 8) { 4994 SDValue AddPtr = PtrOff; 4995 if (!isLittleEndian) { 4996 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 4997 PtrOff.getValueType()); 4998 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 4999 } 5000 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5001 CallSeqStart, 5002 Flags, DAG, dl); 5003 ArgOffset += PtrByteSize; 5004 continue; 5005 } 5006 // Copy entire object into memory. There are cases where gcc-generated 5007 // code assumes it is there, even if it could be put entirely into 5008 // registers. (This is not what the doc says.) 5009 5010 // FIXME: The above statement is likely due to a misunderstanding of the 5011 // documents. All arguments must be copied into the parameter area BY 5012 // THE CALLEE in the event that the callee takes the address of any 5013 // formal argument. That has not yet been implemented. However, it is 5014 // reasonable to use the stack area as a staging area for the register 5015 // load. 5016 5017 // Skip this for small aggregates, as we will use the same slot for a 5018 // right-justified copy, below. 5019 if (Size >= 8) 5020 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5021 CallSeqStart, 5022 Flags, DAG, dl); 5023 5024 // When a register is available, pass a small aggregate right-justified. 5025 if (Size < 8 && GPR_idx != NumGPRs) { 5026 // The easiest way to get this right-justified in a register 5027 // is to copy the structure into the rightmost portion of a 5028 // local variable slot, then load the whole slot into the 5029 // register. 5030 // FIXME: The memcpy seems to produce pretty awful code for 5031 // small aggregates, particularly for packed ones. 5032 // FIXME: It would be preferable to use the slot in the 5033 // parameter save area instead of a new local variable. 5034 SDValue AddPtr = PtrOff; 5035 if (!isLittleEndian) { 5036 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 5037 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5038 } 5039 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5040 CallSeqStart, 5041 Flags, DAG, dl); 5042 5043 // Load the slot into the register. 5044 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, 5045 MachinePointerInfo(), 5046 false, false, false, 0); 5047 MemOpChains.push_back(Load.getValue(1)); 5048 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5049 5050 // Done with this argument. 5051 ArgOffset += PtrByteSize; 5052 continue; 5053 } 5054 5055 // For aggregates larger than PtrByteSize, copy the pieces of the 5056 // object that fit into registers from the parameter save area. 5057 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5058 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5059 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5060 if (GPR_idx != NumGPRs) { 5061 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5062 MachinePointerInfo(), 5063 false, false, false, 0); 5064 MemOpChains.push_back(Load.getValue(1)); 5065 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5066 ArgOffset += PtrByteSize; 5067 } else { 5068 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5069 break; 5070 } 5071 } 5072 continue; 5073 } 5074 5075 switch (Arg.getSimpleValueType().SimpleTy) { 5076 default: llvm_unreachable("Unexpected ValueType for argument!"); 5077 case MVT::i1: 5078 case MVT::i32: 5079 case MVT::i64: 5080 if (Flags.isNest()) { 5081 // The 'nest' parameter, if any, is passed in R11. 5082 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 5083 hasNest = true; 5084 break; 5085 } 5086 5087 // These can be scalar arguments or elements of an integer array type 5088 // passed directly. Clang may use those instead of "byval" aggregate 5089 // types to avoid forcing arguments to memory unnecessarily. 5090 if (GPR_idx != NumGPRs) { 5091 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5092 } else { 5093 if (CallConv == CallingConv::Fast) 5094 ComputePtrOff(); 5095 5096 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5097 true, isTailCall, false, MemOpChains, 5098 TailCallArguments, dl); 5099 if (CallConv == CallingConv::Fast) 5100 ArgOffset += PtrByteSize; 5101 } 5102 if (CallConv != CallingConv::Fast) 5103 ArgOffset += PtrByteSize; 5104 break; 5105 case MVT::f32: 5106 case MVT::f64: { 5107 // These can be scalar arguments or elements of a float array type 5108 // passed directly. The latter are used to implement ELFv2 homogenous 5109 // float aggregates. 5110 5111 // Named arguments go into FPRs first, and once they overflow, the 5112 // remaining arguments go into GPRs and then the parameter save area. 5113 // Unnamed arguments for vararg functions always go to GPRs and 5114 // then the parameter save area. For now, put all arguments to vararg 5115 // routines always in both locations (FPR *and* GPR or stack slot). 5116 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 5117 bool NeededLoad = false; 5118 5119 // First load the argument into the next available FPR. 5120 if (FPR_idx != NumFPRs) 5121 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5122 5123 // Next, load the argument into GPR or stack slot if needed. 5124 if (!NeedGPROrStack) 5125 ; 5126 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 5127 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 5128 // once we support fp <-> gpr moves. 5129 5130 // In the non-vararg case, this can only ever happen in the 5131 // presence of f32 array types, since otherwise we never run 5132 // out of FPRs before running out of GPRs. 5133 SDValue ArgVal; 5134 5135 // Double values are always passed in a single GPR. 5136 if (Arg.getValueType() != MVT::f32) { 5137 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 5138 5139 // Non-array float values are extended and passed in a GPR. 5140 } else if (!Flags.isInConsecutiveRegs()) { 5141 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5142 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5143 5144 // If we have an array of floats, we collect every odd element 5145 // together with its predecessor into one GPR. 5146 } else if (ArgOffset % PtrByteSize != 0) { 5147 SDValue Lo, Hi; 5148 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 5149 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5150 if (!isLittleEndian) 5151 std::swap(Lo, Hi); 5152 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 5153 5154 // The final element, if even, goes into the first half of a GPR. 5155 } else if (Flags.isInConsecutiveRegsLast()) { 5156 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 5157 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 5158 if (!isLittleEndian) 5159 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 5160 DAG.getConstant(32, dl, MVT::i32)); 5161 5162 // Non-final even elements are skipped; they will be handled 5163 // together the with subsequent argument on the next go-around. 5164 } else 5165 ArgVal = SDValue(); 5166 5167 if (ArgVal.getNode()) 5168 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 5169 } else { 5170 if (CallConv == CallingConv::Fast) 5171 ComputePtrOff(); 5172 5173 // Single-precision floating-point values are mapped to the 5174 // second (rightmost) word of the stack doubleword. 5175 if (Arg.getValueType() == MVT::f32 && 5176 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 5177 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5178 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5179 } 5180 5181 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5182 true, isTailCall, false, MemOpChains, 5183 TailCallArguments, dl); 5184 5185 NeededLoad = true; 5186 } 5187 // When passing an array of floats, the array occupies consecutive 5188 // space in the argument area; only round up to the next doubleword 5189 // at the end of the array. Otherwise, each float takes 8 bytes. 5190 if (CallConv != CallingConv::Fast || NeededLoad) { 5191 ArgOffset += (Arg.getValueType() == MVT::f32 && 5192 Flags.isInConsecutiveRegs()) ? 4 : 8; 5193 if (Flags.isInConsecutiveRegsLast()) 5194 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5195 } 5196 break; 5197 } 5198 case MVT::v4f32: 5199 case MVT::v4i32: 5200 case MVT::v8i16: 5201 case MVT::v16i8: 5202 case MVT::v2f64: 5203 case MVT::v2i64: 5204 case MVT::v1i128: 5205 if (!Subtarget.hasQPX()) { 5206 // These can be scalar arguments or elements of a vector array type 5207 // passed directly. The latter are used to implement ELFv2 homogenous 5208 // vector aggregates. 5209 5210 // For a varargs call, named arguments go into VRs or on the stack as 5211 // usual; unnamed arguments always go to the stack or the corresponding 5212 // GPRs when within range. For now, we always put the value in both 5213 // locations (or even all three). 5214 if (isVarArg) { 5215 // We could elide this store in the case where the object fits 5216 // entirely in R registers. Maybe later. 5217 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5218 MachinePointerInfo(), false, false, 0); 5219 MemOpChains.push_back(Store); 5220 if (VR_idx != NumVRs) { 5221 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5222 MachinePointerInfo(), 5223 false, false, false, 0); 5224 MemOpChains.push_back(Load.getValue(1)); 5225 5226 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5227 Arg.getSimpleValueType() == MVT::v2i64) ? 5228 VSRH[VR_idx] : VR[VR_idx]; 5229 ++VR_idx; 5230 5231 RegsToPass.push_back(std::make_pair(VReg, Load)); 5232 } 5233 ArgOffset += 16; 5234 for (unsigned i=0; i<16; i+=PtrByteSize) { 5235 if (GPR_idx == NumGPRs) 5236 break; 5237 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5238 DAG.getConstant(i, dl, PtrVT)); 5239 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5240 false, false, false, 0); 5241 MemOpChains.push_back(Load.getValue(1)); 5242 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5243 } 5244 break; 5245 } 5246 5247 // Non-varargs Altivec params go into VRs or on the stack. 5248 if (VR_idx != NumVRs) { 5249 unsigned VReg = (Arg.getSimpleValueType() == MVT::v2f64 || 5250 Arg.getSimpleValueType() == MVT::v2i64) ? 5251 VSRH[VR_idx] : VR[VR_idx]; 5252 ++VR_idx; 5253 5254 RegsToPass.push_back(std::make_pair(VReg, Arg)); 5255 } else { 5256 if (CallConv == CallingConv::Fast) 5257 ComputePtrOff(); 5258 5259 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5260 true, isTailCall, true, MemOpChains, 5261 TailCallArguments, dl); 5262 if (CallConv == CallingConv::Fast) 5263 ArgOffset += 16; 5264 } 5265 5266 if (CallConv != CallingConv::Fast) 5267 ArgOffset += 16; 5268 break; 5269 } // not QPX 5270 5271 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 5272 "Invalid QPX parameter type"); 5273 5274 /* fall through */ 5275 case MVT::v4f64: 5276 case MVT::v4i1: { 5277 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 5278 if (isVarArg) { 5279 // We could elide this store in the case where the object fits 5280 // entirely in R registers. Maybe later. 5281 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5282 MachinePointerInfo(), false, false, 0); 5283 MemOpChains.push_back(Store); 5284 if (QFPR_idx != NumQFPRs) { 5285 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, 5286 Store, PtrOff, MachinePointerInfo(), 5287 false, false, false, 0); 5288 MemOpChains.push_back(Load.getValue(1)); 5289 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 5290 } 5291 ArgOffset += (IsF32 ? 16 : 32); 5292 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 5293 if (GPR_idx == NumGPRs) 5294 break; 5295 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5296 DAG.getConstant(i, dl, PtrVT)); 5297 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5298 false, false, false, 0); 5299 MemOpChains.push_back(Load.getValue(1)); 5300 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5301 } 5302 break; 5303 } 5304 5305 // Non-varargs QPX params go into registers or on the stack. 5306 if (QFPR_idx != NumQFPRs) { 5307 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 5308 } else { 5309 if (CallConv == CallingConv::Fast) 5310 ComputePtrOff(); 5311 5312 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5313 true, isTailCall, true, MemOpChains, 5314 TailCallArguments, dl); 5315 if (CallConv == CallingConv::Fast) 5316 ArgOffset += (IsF32 ? 16 : 32); 5317 } 5318 5319 if (CallConv != CallingConv::Fast) 5320 ArgOffset += (IsF32 ? 16 : 32); 5321 break; 5322 } 5323 } 5324 } 5325 5326 assert(NumBytesActuallyUsed == ArgOffset); 5327 (void)NumBytesActuallyUsed; 5328 5329 if (!MemOpChains.empty()) 5330 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5331 5332 // Check if this is an indirect call (MTCTR/BCTRL). 5333 // See PrepareCall() for more information about calls through function 5334 // pointers in the 64-bit SVR4 ABI. 5335 if (!isTailCall && !IsPatchPoint && 5336 !isFunctionGlobalAddress(Callee) && 5337 !isa<ExternalSymbolSDNode>(Callee)) { 5338 // Load r2 into a virtual register and store it to the TOC save area. 5339 setUsesTOCBasePtr(DAG); 5340 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 5341 // TOC save area offset. 5342 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5343 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5344 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5345 Chain = DAG.getStore( 5346 Val.getValue(1), dl, Val, AddPtr, 5347 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset), 5348 false, false, 0); 5349 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 5350 // This does not mean the MTCTR instruction must use R12; it's easier 5351 // to model this as an extra parameter, so do that. 5352 if (isELFv2ABI && !IsPatchPoint) 5353 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 5354 } 5355 5356 // Build a sequence of copy-to-reg nodes chained together with token chain 5357 // and flag operands which copy the outgoing args into the appropriate regs. 5358 SDValue InFlag; 5359 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5360 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5361 RegsToPass[i].second, InFlag); 5362 InFlag = Chain.getValue(1); 5363 } 5364 5365 if (isTailCall) 5366 PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, 5367 FPOp, true, TailCallArguments); 5368 5369 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, hasNest, 5370 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 5371 SPDiff, NumBytes, Ins, InVals, CS); 5372 } 5373 5374 SDValue 5375 PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, 5376 CallingConv::ID CallConv, bool isVarArg, 5377 bool isTailCall, bool IsPatchPoint, 5378 const SmallVectorImpl<ISD::OutputArg> &Outs, 5379 const SmallVectorImpl<SDValue> &OutVals, 5380 const SmallVectorImpl<ISD::InputArg> &Ins, 5381 SDLoc dl, SelectionDAG &DAG, 5382 SmallVectorImpl<SDValue> &InVals, 5383 ImmutableCallSite *CS) const { 5384 5385 unsigned NumOps = Outs.size(); 5386 5387 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5388 bool isPPC64 = PtrVT == MVT::i64; 5389 unsigned PtrByteSize = isPPC64 ? 8 : 4; 5390 5391 MachineFunction &MF = DAG.getMachineFunction(); 5392 5393 // Mark this function as potentially containing a function that contains a 5394 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5395 // and restoring the callers stack pointer in this functions epilog. This is 5396 // done because by tail calling the called function might overwrite the value 5397 // in this function's (MF) stack pointer stack slot 0(SP). 5398 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5399 CallConv == CallingConv::Fast) 5400 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5401 5402 // Count how many bytes are to be pushed on the stack, including the linkage 5403 // area, and parameter passing area. We start with 24/48 bytes, which is 5404 // prereserved space for [SP][CR][LR][3 x unused]. 5405 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5406 unsigned NumBytes = LinkageSize; 5407 5408 // Add up all the space actually used. 5409 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 5410 // they all go in registers, but we must reserve stack space for them for 5411 // possible use by the caller. In varargs or 64-bit calls, parameters are 5412 // assigned stack space in order, with padding so Altivec parameters are 5413 // 16-byte aligned. 5414 unsigned nAltivecParamsAtEnd = 0; 5415 for (unsigned i = 0; i != NumOps; ++i) { 5416 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5417 EVT ArgVT = Outs[i].VT; 5418 // Varargs Altivec parameters are padded to a 16 byte boundary. 5419 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 5420 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 5421 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 5422 if (!isVarArg && !isPPC64) { 5423 // Non-varargs Altivec parameters go after all the non-Altivec 5424 // parameters; handle those later so we know how much padding we need. 5425 nAltivecParamsAtEnd++; 5426 continue; 5427 } 5428 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 5429 NumBytes = ((NumBytes+15)/16)*16; 5430 } 5431 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5432 } 5433 5434 // Allow for Altivec parameters at the end, if needed. 5435 if (nAltivecParamsAtEnd) { 5436 NumBytes = ((NumBytes+15)/16)*16; 5437 NumBytes += 16*nAltivecParamsAtEnd; 5438 } 5439 5440 // The prolog code of the callee may store up to 8 GPR argument registers to 5441 // the stack, allowing va_start to index over them in memory if its varargs. 5442 // Because we cannot tell if this is needed on the caller side, we have to 5443 // conservatively assume that it is needed. As such, make sure we have at 5444 // least enough stack space for the caller to store the 8 GPRs. 5445 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 5446 5447 // Tail call needs the stack to be aligned. 5448 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5449 CallConv == CallingConv::Fast) 5450 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 5451 5452 // Calculate by how many bytes the stack has to be adjusted in case of tail 5453 // call optimization. 5454 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5455 5456 // To protect arguments on the stack from being clobbered in a tail call, 5457 // force all the loads to happen before doing any other lowering. 5458 if (isTailCall) 5459 Chain = DAG.getStackArgumentTokenFactor(Chain); 5460 5461 // Adjust the stack pointer for the new arguments... 5462 // These operations are automatically eliminated by the prolog/epilog pass 5463 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5464 dl); 5465 SDValue CallSeqStart = Chain; 5466 5467 // Load the return address and frame pointer so it can be move somewhere else 5468 // later. 5469 SDValue LROp, FPOp; 5470 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, 5471 dl); 5472 5473 // Set up a copy of the stack pointer for use loading and storing any 5474 // arguments that may not fit in the registers available for argument 5475 // passing. 5476 SDValue StackPtr; 5477 if (isPPC64) 5478 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 5479 else 5480 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5481 5482 // Figure out which arguments are going to go in registers, and which in 5483 // memory. Also, if this is a vararg function, floating point operations 5484 // must be stored to our stack, and loaded into integer regs as well, if 5485 // any integer regs are available for argument passing. 5486 unsigned ArgOffset = LinkageSize; 5487 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5488 5489 static const MCPhysReg GPR_32[] = { // 32-bit registers. 5490 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 5491 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 5492 }; 5493 static const MCPhysReg GPR_64[] = { // 64-bit registers. 5494 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5495 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5496 }; 5497 static const MCPhysReg VR[] = { 5498 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5499 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5500 }; 5501 const unsigned NumGPRs = array_lengthof(GPR_32); 5502 const unsigned NumFPRs = 13; 5503 const unsigned NumVRs = array_lengthof(VR); 5504 5505 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 5506 5507 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5508 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5509 5510 SmallVector<SDValue, 8> MemOpChains; 5511 for (unsigned i = 0; i != NumOps; ++i) { 5512 SDValue Arg = OutVals[i]; 5513 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5514 5515 // PtrOff will be used to store the current argument to the stack if a 5516 // register cannot be found for it. 5517 SDValue PtrOff; 5518 5519 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 5520 5521 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 5522 5523 // On PPC64, promote integers to 64-bit values. 5524 if (isPPC64 && Arg.getValueType() == MVT::i32) { 5525 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 5526 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 5527 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 5528 } 5529 5530 // FIXME memcpy is used way more than necessary. Correctness first. 5531 // Note: "by value" is code for passing a structure by value, not 5532 // basic types. 5533 if (Flags.isByVal()) { 5534 unsigned Size = Flags.getByValSize(); 5535 // Very small objects are passed right-justified. Everything else is 5536 // passed left-justified. 5537 if (Size==1 || Size==2) { 5538 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 5539 if (GPR_idx != NumGPRs) { 5540 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 5541 MachinePointerInfo(), VT, 5542 false, false, false, 0); 5543 MemOpChains.push_back(Load.getValue(1)); 5544 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5545 5546 ArgOffset += PtrByteSize; 5547 } else { 5548 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 5549 PtrOff.getValueType()); 5550 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 5551 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 5552 CallSeqStart, 5553 Flags, DAG, dl); 5554 ArgOffset += PtrByteSize; 5555 } 5556 continue; 5557 } 5558 // Copy entire object into memory. There are cases where gcc-generated 5559 // code assumes it is there, even if it could be put entirely into 5560 // registers. (This is not what the doc says.) 5561 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 5562 CallSeqStart, 5563 Flags, DAG, dl); 5564 5565 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 5566 // copy the pieces of the object that fit into registers from the 5567 // parameter save area. 5568 for (unsigned j=0; j<Size; j+=PtrByteSize) { 5569 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 5570 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 5571 if (GPR_idx != NumGPRs) { 5572 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 5573 MachinePointerInfo(), 5574 false, false, false, 0); 5575 MemOpChains.push_back(Load.getValue(1)); 5576 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5577 ArgOffset += PtrByteSize; 5578 } else { 5579 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 5580 break; 5581 } 5582 } 5583 continue; 5584 } 5585 5586 switch (Arg.getSimpleValueType().SimpleTy) { 5587 default: llvm_unreachable("Unexpected ValueType for argument!"); 5588 case MVT::i1: 5589 case MVT::i32: 5590 case MVT::i64: 5591 if (GPR_idx != NumGPRs) { 5592 if (Arg.getValueType() == MVT::i1) 5593 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 5594 5595 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 5596 } else { 5597 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5598 isPPC64, isTailCall, false, MemOpChains, 5599 TailCallArguments, dl); 5600 } 5601 ArgOffset += PtrByteSize; 5602 break; 5603 case MVT::f32: 5604 case MVT::f64: 5605 if (FPR_idx != NumFPRs) { 5606 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 5607 5608 if (isVarArg) { 5609 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5610 MachinePointerInfo(), false, false, 0); 5611 MemOpChains.push_back(Store); 5612 5613 // Float varargs are always shadowed in available integer registers 5614 if (GPR_idx != NumGPRs) { 5615 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5616 MachinePointerInfo(), false, false, 5617 false, 0); 5618 MemOpChains.push_back(Load.getValue(1)); 5619 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5620 } 5621 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 5622 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 5623 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 5624 SDValue Load = DAG.getLoad(PtrVT, dl, Store, PtrOff, 5625 MachinePointerInfo(), 5626 false, false, false, 0); 5627 MemOpChains.push_back(Load.getValue(1)); 5628 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5629 } 5630 } else { 5631 // If we have any FPRs remaining, we may also have GPRs remaining. 5632 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 5633 // GPRs. 5634 if (GPR_idx != NumGPRs) 5635 ++GPR_idx; 5636 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 5637 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 5638 ++GPR_idx; 5639 } 5640 } else 5641 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5642 isPPC64, isTailCall, false, MemOpChains, 5643 TailCallArguments, dl); 5644 if (isPPC64) 5645 ArgOffset += 8; 5646 else 5647 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 5648 break; 5649 case MVT::v4f32: 5650 case MVT::v4i32: 5651 case MVT::v8i16: 5652 case MVT::v16i8: 5653 if (isVarArg) { 5654 // These go aligned on the stack, or in the corresponding R registers 5655 // when within range. The Darwin PPC ABI doc claims they also go in 5656 // V registers; in fact gcc does this only for arguments that are 5657 // prototyped, not for those that match the ... We do it for all 5658 // arguments, seems to work. 5659 while (ArgOffset % 16 !=0) { 5660 ArgOffset += PtrByteSize; 5661 if (GPR_idx != NumGPRs) 5662 GPR_idx++; 5663 } 5664 // We could elide this store in the case where the object fits 5665 // entirely in R registers. Maybe later. 5666 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 5667 DAG.getConstant(ArgOffset, dl, PtrVT)); 5668 SDValue Store = DAG.getStore(Chain, dl, Arg, PtrOff, 5669 MachinePointerInfo(), false, false, 0); 5670 MemOpChains.push_back(Store); 5671 if (VR_idx != NumVRs) { 5672 SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, 5673 MachinePointerInfo(), 5674 false, false, false, 0); 5675 MemOpChains.push_back(Load.getValue(1)); 5676 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 5677 } 5678 ArgOffset += 16; 5679 for (unsigned i=0; i<16; i+=PtrByteSize) { 5680 if (GPR_idx == NumGPRs) 5681 break; 5682 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 5683 DAG.getConstant(i, dl, PtrVT)); 5684 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo(), 5685 false, false, false, 0); 5686 MemOpChains.push_back(Load.getValue(1)); 5687 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 5688 } 5689 break; 5690 } 5691 5692 // Non-varargs Altivec params generally go in registers, but have 5693 // stack space allocated at the end. 5694 if (VR_idx != NumVRs) { 5695 // Doesn't have GPR space allocated. 5696 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 5697 } else if (nAltivecParamsAtEnd==0) { 5698 // We are emitting Altivec params in order. 5699 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5700 isPPC64, isTailCall, true, MemOpChains, 5701 TailCallArguments, dl); 5702 ArgOffset += 16; 5703 } 5704 break; 5705 } 5706 } 5707 // If all Altivec parameters fit in registers, as they usually do, 5708 // they get stack space following the non-Altivec parameters. We 5709 // don't track this here because nobody below needs it. 5710 // If there are more Altivec parameters than fit in registers emit 5711 // the stores here. 5712 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 5713 unsigned j = 0; 5714 // Offset is aligned; skip 1st 12 params which go in V registers. 5715 ArgOffset = ((ArgOffset+15)/16)*16; 5716 ArgOffset += 12*16; 5717 for (unsigned i = 0; i != NumOps; ++i) { 5718 SDValue Arg = OutVals[i]; 5719 EVT ArgType = Outs[i].VT; 5720 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 5721 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 5722 if (++j > NumVRs) { 5723 SDValue PtrOff; 5724 // We are emitting Altivec params in order. 5725 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 5726 isPPC64, isTailCall, true, MemOpChains, 5727 TailCallArguments, dl); 5728 ArgOffset += 16; 5729 } 5730 } 5731 } 5732 } 5733 5734 if (!MemOpChains.empty()) 5735 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5736 5737 // On Darwin, R12 must contain the address of an indirect callee. This does 5738 // not mean the MTCTR instruction must use R12; it's easier to model this as 5739 // an extra parameter, so do that. 5740 if (!isTailCall && 5741 !isFunctionGlobalAddress(Callee) && 5742 !isa<ExternalSymbolSDNode>(Callee) && 5743 !isBLACompatibleAddress(Callee, DAG)) 5744 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 5745 PPC::R12), Callee)); 5746 5747 // Build a sequence of copy-to-reg nodes chained together with token chain 5748 // and flag operands which copy the outgoing args into the appropriate regs. 5749 SDValue InFlag; 5750 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5751 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5752 RegsToPass[i].second, InFlag); 5753 InFlag = Chain.getValue(1); 5754 } 5755 5756 if (isTailCall) 5757 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp, 5758 FPOp, true, TailCallArguments); 5759 5760 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint, 5761 /* unused except on PPC64 ELFv1 */ false, DAG, 5762 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5763 NumBytes, Ins, InVals, CS); 5764 } 5765 5766 bool 5767 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 5768 MachineFunction &MF, bool isVarArg, 5769 const SmallVectorImpl<ISD::OutputArg> &Outs, 5770 LLVMContext &Context) const { 5771 SmallVector<CCValAssign, 16> RVLocs; 5772 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 5773 return CCInfo.CheckReturn(Outs, RetCC_PPC); 5774 } 5775 5776 SDValue 5777 PPCTargetLowering::LowerReturn(SDValue Chain, 5778 CallingConv::ID CallConv, bool isVarArg, 5779 const SmallVectorImpl<ISD::OutputArg> &Outs, 5780 const SmallVectorImpl<SDValue> &OutVals, 5781 SDLoc dl, SelectionDAG &DAG) const { 5782 5783 SmallVector<CCValAssign, 16> RVLocs; 5784 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5785 *DAG.getContext()); 5786 CCInfo.AnalyzeReturn(Outs, RetCC_PPC); 5787 5788 SDValue Flag; 5789 SmallVector<SDValue, 4> RetOps(1, Chain); 5790 5791 // Copy the result values into the output registers. 5792 for (unsigned i = 0; i != RVLocs.size(); ++i) { 5793 CCValAssign &VA = RVLocs[i]; 5794 assert(VA.isRegLoc() && "Can only return in registers!"); 5795 5796 SDValue Arg = OutVals[i]; 5797 5798 switch (VA.getLocInfo()) { 5799 default: llvm_unreachable("Unknown loc info!"); 5800 case CCValAssign::Full: break; 5801 case CCValAssign::AExt: 5802 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 5803 break; 5804 case CCValAssign::ZExt: 5805 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 5806 break; 5807 case CCValAssign::SExt: 5808 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 5809 break; 5810 } 5811 5812 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 5813 Flag = Chain.getValue(1); 5814 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 5815 } 5816 5817 RetOps[0] = Chain; // Update chain. 5818 5819 // Add the flag if we have it. 5820 if (Flag.getNode()) 5821 RetOps.push_back(Flag); 5822 5823 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 5824 } 5825 5826 SDValue PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET( 5827 SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) const { 5828 SDLoc dl(Op); 5829 5830 // Get the corect type for integers. 5831 EVT IntVT = Op.getValueType(); 5832 5833 // Get the inputs. 5834 SDValue Chain = Op.getOperand(0); 5835 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 5836 // Build a DYNAREAOFFSET node. 5837 SDValue Ops[2] = {Chain, FPSIdx}; 5838 SDVTList VTs = DAG.getVTList(IntVT); 5839 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 5840 } 5841 5842 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, 5843 const PPCSubtarget &Subtarget) const { 5844 // When we pop the dynamic allocation we need to restore the SP link. 5845 SDLoc dl(Op); 5846 5847 // Get the corect type for pointers. 5848 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5849 5850 // Construct the stack pointer operand. 5851 bool isPPC64 = Subtarget.isPPC64(); 5852 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 5853 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 5854 5855 // Get the operands for the STACKRESTORE. 5856 SDValue Chain = Op.getOperand(0); 5857 SDValue SaveSP = Op.getOperand(1); 5858 5859 // Load the old link SP. 5860 SDValue LoadLinkSP = DAG.getLoad(PtrVT, dl, Chain, StackPtr, 5861 MachinePointerInfo(), 5862 false, false, false, 0); 5863 5864 // Restore the stack pointer. 5865 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 5866 5867 // Store the old link SP. 5868 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo(), 5869 false, false, 0); 5870 } 5871 5872 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 5873 MachineFunction &MF = DAG.getMachineFunction(); 5874 bool isPPC64 = Subtarget.isPPC64(); 5875 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5876 5877 // Get current frame pointer save index. The users of this index will be 5878 // primarily DYNALLOC instructions. 5879 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5880 int RASI = FI->getReturnAddrSaveIndex(); 5881 5882 // If the frame pointer save index hasn't been defined yet. 5883 if (!RASI) { 5884 // Find out what the fix offset of the frame pointer save area. 5885 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 5886 // Allocate the frame index for frame pointer save area. 5887 RASI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 5888 // Save the result. 5889 FI->setReturnAddrSaveIndex(RASI); 5890 } 5891 return DAG.getFrameIndex(RASI, PtrVT); 5892 } 5893 5894 SDValue 5895 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 5896 MachineFunction &MF = DAG.getMachineFunction(); 5897 bool isPPC64 = Subtarget.isPPC64(); 5898 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(MF.getDataLayout()); 5899 5900 // Get current frame pointer save index. The users of this index will be 5901 // primarily DYNALLOC instructions. 5902 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 5903 int FPSI = FI->getFramePointerSaveIndex(); 5904 5905 // If the frame pointer save index hasn't been defined yet. 5906 if (!FPSI) { 5907 // Find out what the fix offset of the frame pointer save area. 5908 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 5909 // Allocate the frame index for frame pointer save area. 5910 FPSI = MF.getFrameInfo()->CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 5911 // Save the result. 5912 FI->setFramePointerSaveIndex(FPSI); 5913 } 5914 return DAG.getFrameIndex(FPSI, PtrVT); 5915 } 5916 5917 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 5918 SelectionDAG &DAG, 5919 const PPCSubtarget &Subtarget) const { 5920 // Get the inputs. 5921 SDValue Chain = Op.getOperand(0); 5922 SDValue Size = Op.getOperand(1); 5923 SDLoc dl(Op); 5924 5925 // Get the corect type for pointers. 5926 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5927 // Negate the size. 5928 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 5929 DAG.getConstant(0, dl, PtrVT), Size); 5930 // Construct a node for the frame pointer save index. 5931 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 5932 // Build a DYNALLOC node. 5933 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 5934 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 5935 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 5936 } 5937 5938 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 5939 SelectionDAG &DAG) const { 5940 SDLoc DL(Op); 5941 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 5942 DAG.getVTList(MVT::i32, MVT::Other), 5943 Op.getOperand(0), Op.getOperand(1)); 5944 } 5945 5946 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 5947 SelectionDAG &DAG) const { 5948 SDLoc DL(Op); 5949 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 5950 Op.getOperand(0), Op.getOperand(1)); 5951 } 5952 5953 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 5954 if (Op.getValueType().isVector()) 5955 return LowerVectorLoad(Op, DAG); 5956 5957 assert(Op.getValueType() == MVT::i1 && 5958 "Custom lowering only for i1 loads"); 5959 5960 // First, load 8 bits into 32 bits, then truncate to 1 bit. 5961 5962 SDLoc dl(Op); 5963 LoadSDNode *LD = cast<LoadSDNode>(Op); 5964 5965 SDValue Chain = LD->getChain(); 5966 SDValue BasePtr = LD->getBasePtr(); 5967 MachineMemOperand *MMO = LD->getMemOperand(); 5968 5969 SDValue NewLD = 5970 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 5971 BasePtr, MVT::i8, MMO); 5972 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 5973 5974 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 5975 return DAG.getMergeValues(Ops, dl); 5976 } 5977 5978 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 5979 if (Op.getOperand(1).getValueType().isVector()) 5980 return LowerVectorStore(Op, DAG); 5981 5982 assert(Op.getOperand(1).getValueType() == MVT::i1 && 5983 "Custom lowering only for i1 stores"); 5984 5985 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 5986 5987 SDLoc dl(Op); 5988 StoreSDNode *ST = cast<StoreSDNode>(Op); 5989 5990 SDValue Chain = ST->getChain(); 5991 SDValue BasePtr = ST->getBasePtr(); 5992 SDValue Value = ST->getValue(); 5993 MachineMemOperand *MMO = ST->getMemOperand(); 5994 5995 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 5996 Value); 5997 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 5998 } 5999 6000 // FIXME: Remove this once the ANDI glue bug is fixed: 6001 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 6002 assert(Op.getValueType() == MVT::i1 && 6003 "Custom lowering only for i1 results"); 6004 6005 SDLoc DL(Op); 6006 return DAG.getNode(PPCISD::ANDIo_1_GT_BIT, DL, MVT::i1, 6007 Op.getOperand(0)); 6008 } 6009 6010 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 6011 /// possible. 6012 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 6013 // Not FP? Not a fsel. 6014 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 6015 !Op.getOperand(2).getValueType().isFloatingPoint()) 6016 return Op; 6017 6018 // We might be able to do better than this under some circumstances, but in 6019 // general, fsel-based lowering of select is a finite-math-only optimization. 6020 // For more information, see section F.3 of the 2.06 ISA specification.