1 //===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This describes the calling conventions for the PowerPC 32- and 64-bit 11 // architectures. 12 // 13 //===----------------------------------------------------------------------===// 14 15 /// CCIfSubtarget - Match if the current subtarget has a feature F. 16 class CCIfSubtarget<string F, CCAction A> 17 : CCIf<!strconcat("static_cast<const PPCSubtarget&>" 18 "(State.getMachineFunction().getSubtarget()).", 19 F), 20 A>; 21 class CCIfNotSubtarget<string F, CCAction A> 22 : CCIf<!strconcat("!static_cast<const PPCSubtarget&>" 23 "(State.getMachineFunction().getSubtarget()).", 24 F), 25 A>; 26 class CCIfOrigArgWasNotPPCF128<CCAction A> 27 : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 28 A>; 29 30 //===----------------------------------------------------------------------===// 31 // Return Value Calling Convention 32 //===----------------------------------------------------------------------===// 33 34 // PPC64 AnyReg return-value convention. No explicit register is specified for 35 // the return-value. The register allocator is allowed and expected to choose 36 // any free register. 37 // 38 // This calling convention is currently only supported by the stackmap and 39 // patchpoint intrinsics. All other uses will result in an assert on Debug 40 // builds. On Release builds we fallback to the PPC C calling convention. 41 def RetCC_PPC64_AnyReg : CallingConv<[ 42 CCCustom<"CC_PPC_AnyReg_Error"> 43 ]>; 44 45 // Return-value convention for PowerPC 46 def RetCC_PPC : CallingConv<[ 47 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 48 49 // On PPC64, integer return values are always promoted to i64 50 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 51 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 52 53 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 54 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 55 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 56 57 // Floating point types returned as "direct" go into F1 .. F8; note that 58 // only the ELFv2 ABI fully utilizes all these registers. 59 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 60 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 61 62 // QPX vectors are returned in QF1 and QF2. 63 CCIfType<[v4f64, v4f32, v4i1], 64 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>, 65 66 // Vector types returned as "direct" go into V2 .. V9; note that only the 67 // ELFv2 ABI fully utilizes all these registers. 68 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32], 69 CCIfSubtarget<"hasAltivec()", 70 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 71 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()", 72 CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>> 73 ]>; 74 75 // No explicit register is specified for the AnyReg calling convention. The 76 // register allocator may assign the arguments to any free register. 77 // 78 // This calling convention is currently only supported by the stackmap and 79 // patchpoint intrinsics. All other uses will result in an assert on Debug 80 // builds. On Release builds we fallback to the PPC C calling convention. 81 def CC_PPC64_AnyReg : CallingConv<[ 82 CCCustom<"CC_PPC_AnyReg_Error"> 83 ]>; 84 85 // Note that we don't currently have calling conventions for 64-bit 86 // PowerPC, but handle all the complexities of the ABI in the lowering 87 // logic. FIXME: See if the logic can be simplified with use of CCs. 88 // This may require some extensions to current table generation. 89 90 // Simple calling convention for 64-bit ELF PowerPC fast isel. 91 // Only handle ints and floats. All ints are promoted to i64. 92 // Vector types and quadword ints are not handled. 93 def CC_PPC64_ELF_FIS : CallingConv<[ 94 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>, 95 96 CCIfType<[i1], CCPromoteToType<i64>>, 97 CCIfType<[i8], CCPromoteToType<i64>>, 98 CCIfType<[i16], CCPromoteToType<i64>>, 99 CCIfType<[i32], CCPromoteToType<i64>>, 100 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>, 101 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>> 102 ]>; 103 104 // Simple return-value convention for 64-bit ELF PowerPC fast isel. 105 // All small ints are promoted to i64. Vector types, quadword ints, 106 // and multiple register returns are "supported" to avoid compile 107 // errors, but none are handled by the fast selector. 108 def RetCC_PPC64_ELF_FIS : CallingConv<[ 109 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 110 111 CCIfType<[i1], CCPromoteToType<i64>>, 112 CCIfType<[i8], CCPromoteToType<i64>>, 113 CCIfType<[i16], CCPromoteToType<i64>>, 114 CCIfType<[i32], CCPromoteToType<i64>>, 115 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 116 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 117 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 118 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 119 CCIfType<[v4f64, v4f32, v4i1], 120 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>, 121 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32], 122 CCIfSubtarget<"hasAltivec()", 123 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 124 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()", 125 CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>> 126 ]>; 127 128 //===----------------------------------------------------------------------===// 129 // PowerPC System V Release 4 32-bit ABI 130 //===----------------------------------------------------------------------===// 131 132 def CC_PPC32_SVR4_Common : CallingConv<[ 133 CCIfType<[i1], CCPromoteToType<i32>>, 134 135 // The ABI requires i64 to be passed in two adjacent registers with the first 136 // register having an odd register number. 137 CCIfType<[i32], 138 CCIfSplit<CCIfSubtarget<"useSoftFloat()", 139 CCIfOrigArgWasNotPPCF128< 140 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>, 141 142 CCIfType<[i32], 143 CCIfSplit<CCIfNotSubtarget<"useSoftFloat()", 144 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>, 145 146 // The 'nest' parameter, if any, is passed in R11. 147 CCIfNest<CCAssignToReg<[R11]>>, 148 149 // The first 8 integer arguments are passed in integer registers. 150 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 151 152 // Make sure the i64 words from a long double are either both passed in 153 // registers or both passed on the stack. 154 CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>, 155 156 // FP values are passed in F1 - F8. 157 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 158 159 // Split arguments have an alignment of 8 bytes on the stack. 160 CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>, 161 162 CCIfType<[i32], CCAssignToStack<4, 4>>, 163 164 // Floats are stored in double precision format, thus they have the same 165 // alignment and size as doubles. 166 CCIfType<[f32,f64], CCAssignToStack<8, 8>>, 167 168 // QPX vectors that are stored in double precision need 32-byte alignment. 169 CCIfType<[v4f64, v4i1], CCAssignToStack<32, 32>>, 170 171 // Vectors get 16-byte stack slots that are 16-byte aligned. 172 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>> 173 ]>; 174 175 // This calling convention puts vector arguments always on the stack. It is used 176 // to assign vector arguments which belong to the variable portion of the 177 // parameter list of a variable argument function. 178 def CC_PPC32_SVR4_VarArg : CallingConv<[ 179 CCDelegateTo<CC_PPC32_SVR4_Common> 180 ]>; 181 182 // In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to 183 // put vector arguments in vector registers before putting them on the stack. 184 def CC_PPC32_SVR4 : CallingConv<[ 185 // QPX vectors mirror the scalar FP convention. 186 CCIfType<[v4f64, v4f32, v4i1], CCIfSubtarget<"hasQPX()", 187 CCAssignToReg<[QF1, QF2, QF3, QF4, QF5, QF6, QF7, QF8]>>>, 188 189 // The first 12 Vector arguments are passed in AltiVec registers. 190 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32], 191 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 192 V8, V9, V10, V11, V12, V13]>>>, 193 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()", 194 CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9, 195 VSH10, VSH11, VSH12, VSH13]>>>, 196 197 CCDelegateTo<CC_PPC32_SVR4_Common> 198 ]>; 199 200 // Helper "calling convention" to handle aggregate by value arguments. 201 // Aggregate by value arguments are always placed in the local variable space 202 // of the caller. This calling convention is only used to assign those stack 203 // offsets in the callers stack frame. 204 // 205 // Still, the address of the aggregate copy in the callers stack frame is passed 206 // in a GPR (or in the parameter list area if all GPRs are allocated) from the 207 // caller to the callee. The location for the address argument is assigned by 208 // the CC_PPC32_SVR4 calling convention. 209 // 210 // The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are 211 // not passed by value. 212 213 def CC_PPC32_SVR4_ByVal : CallingConv<[ 214 CCIfByVal<CCPassByVal<4, 4>>, 215 216 CCCustom<"CC_PPC32_SVR4_Custom_Dummy"> 217 ]>; 218 219 def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27, 220 V28, V29, V30, V31)>; 221 222 def CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20, 223 R21, R22, R23, R24, R25, R26, R27, R28, 224 R29, R30, R31, F14, F15, F16, F17, F18, 225 F19, F20, F21, F22, F23, F24, F25, F26, 226 F27, F28, F29, F30, F31, CR2, CR3, CR4 227 )>; 228 229 def CSR_Darwin32_Altivec : CalleeSavedRegs<(add CSR_Darwin32, CSR_Altivec)>; 230 231 def CSR_SVR432 : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, 232 R21, R22, R23, R24, R25, R26, R27, R28, 233 R29, R30, R31, F14, F15, F16, F17, F18, 234 F19, F20, F21, F22, F23, F24, F25, F26, 235 F27, F28, F29, F30, F31, CR2, CR3, CR4 236 )>; 237 238 def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>; 239 240 def CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20, 241 X21, X22, X23, X24, X25, X26, X27, X28, 242 X29, X30, X31, F14, F15, F16, F17, F18, 243 F19, F20, F21, F22, F23, F24, F25, F26, 244 F27, F28, F29, F30, F31, CR2, CR3, CR4 245 )>; 246 247 def CSR_Darwin64_Altivec : CalleeSavedRegs<(add CSR_Darwin64, CSR_Altivec)>; 248 249 def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, 250 X21, X22, X23, X24, X25, X26, X27, X28, 251 X29, X30, X31, F14, F15, F16, F17, F18, 252 F19, F20, F21, F22, F23, F24, F25, F26, 253 F27, F28, F29, F30, F31, CR2, CR3, CR4 254 )>; 255 256 // CSRs that are handled by prologue, epilogue. 257 def CSR_SRV464_TLS_PE : CalleeSavedRegs<(add)>; 258 259 def CSR_SVR464_ViaCopy : CalleeSavedRegs<(add CSR_SVR464)>; 260 261 def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>; 262 263 def CSR_SVR464_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_Altivec)>; 264 265 def CSR_SVR464_R2 : CalleeSavedRegs<(add CSR_SVR464, X2)>; 266 267 def CSR_SVR464_R2_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2)>; 268 269 def CSR_SVR464_R2_Altivec : CalleeSavedRegs<(add CSR_SVR464_Altivec, X2)>; 270 271 def CSR_SVR464_R2_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2_Altivec)>; 272 273 def CSR_NoRegs : CalleeSavedRegs<(add)>; 274 275 def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10), 276 (sequence "X%u", 14, 31), 277 (sequence "F%u", 0, 31), 278 (sequence "CR%u", 0, 7))>; 279 280 def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 281 (sequence "V%u", 0, 31))>; 282 283 def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 284 (sequence "VSL%u", 0, 31), 285 (sequence "VSH%u", 0, 31))>; 286 287