1 //=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This describes the calling conventions for AArch64 architecture. 11 // 12 //===----------------------------------------------------------------------===// 13 14 /// CCIfAlign - Match of the original alignment of the arg 15 class CCIfAlign<string Align, CCAction A> : 16 CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>; 17 /// CCIfBigEndian - Match only if we're in big endian mode. 18 class CCIfBigEndian<CCAction A> : 19 CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>; 20 21 //===----------------------------------------------------------------------===// 22 // ARM AAPCS64 Calling Convention 23 //===----------------------------------------------------------------------===// 24 25 def CC_AArch64_AAPCS : CallingConv<[ 26 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 27 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 28 29 // Big endian vectors must be passed as if they were 1-element vectors so that 30 // their lanes are in a consistent order. 31 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8], 32 CCBitConvertToType<f64>>>, 33 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 34 CCBitConvertToType<f128>>>, 35 36 // An SRet is passed in X8, not X0 like a normal pointer parameter. 37 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>, 38 39 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 40 // slot is 64-bit. 41 CCIfByVal<CCPassByVal<8, 8>>, 42 43 // The 'nest' parameter, if any, is passed in X18. 44 // Darwin uses X18 as the platform register and hence 'nest' isn't currently 45 // supported there. 46 CCIfNest<CCAssignToReg<[X18]>>, 47 48 // Pass SwiftSelf in a callee saved register. 49 CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>, 50 51 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 52 53 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 54 // up to eight each of GPR and FPR. 55 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 56 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 57 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 58 // i128 is split to two i64s, we can't fit half to register X7. 59 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6], 60 [X0, X1, X3, X5]>>>, 61 62 // i128 is split to two i64s, and its stack alignment is 16 bytes. 63 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 64 65 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 66 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 67 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 68 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 69 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 70 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 71 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 72 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 73 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 74 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 75 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 76 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 77 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 78 79 // If more than will fit in registers, pass them on the stack instead. 80 CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>, 81 CCIfType<[i32, f32], CCAssignToStack<8, 8>>, 82 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 83 CCAssignToStack<8, 8>>, 84 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 85 CCAssignToStack<16, 16>> 86 ]>; 87 88 def RetCC_AArch64_AAPCS : CallingConv<[ 89 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 90 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 91 92 CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X19], [W19]>>>, 93 94 // Big endian vectors must be passed as if they were 1-element vectors so that 95 // their lanes are in a consistent order. 96 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8], 97 CCBitConvertToType<f64>>>, 98 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 99 CCBitConvertToType<f128>>>, 100 101 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 102 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 103 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 104 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 105 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 106 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 107 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 108 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 109 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 110 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 111 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 112 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 113 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 114 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 115 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> 116 ]>; 117 118 119 // Darwin uses a calling convention which differs in only two ways 120 // from the standard one at this level: 121 // + i128s (i.e. split i64s) don't need even registers. 122 // + Stack slots are sized as needed rather than being at least 64-bit. 123 def CC_AArch64_DarwinPCS : CallingConv<[ 124 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 125 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 126 127 // An SRet is passed in X8, not X0 like a normal pointer parameter. 128 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>, 129 130 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 131 // slot is 64-bit. 132 CCIfByVal<CCPassByVal<8, 8>>, 133 134 // Pass SwiftSelf in a callee saved register. 135 CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>, 136 137 // A SwiftError is passed in X19. 138 CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X19], [W19]>>>, 139 140 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 141 142 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 143 // up to eight each of GPR and FPR. 144 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 145 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 146 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 147 // i128 is split to two i64s, we can't fit half to register X7. 148 CCIfType<[i64], 149 CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6], 150 [W0, W1, W2, W3, W4, W5, W6]>>>, 151 // i128 is split to two i64s, and its stack alignment is 16 bytes. 152 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 153 154 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 155 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 156 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 157 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 158 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 159 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 160 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 161 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 162 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 163 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 164 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 165 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 166 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 167 168 // If more than will fit in registers, pass them on the stack instead. 169 CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>, 170 CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>, 171 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 172 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 173 CCAssignToStack<8, 8>>, 174 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 175 CCAssignToStack<16, 16>> 176 ]>; 177 178 def CC_AArch64_DarwinPCS_VarArg : CallingConv<[ 179 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 180 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 181 182 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>, 183 184 // Handle all scalar types as either i64 or f64. 185 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 186 CCIfType<[f16, f32], CCPromoteToType<f64>>, 187 188 // Everything is on the stack. 189 // i128 is split to two i64s, and its stack alignment is 16 bytes. 190 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>, 191 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 192 CCAssignToStack<8, 8>>, 193 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 194 CCAssignToStack<16, 16>> 195 ]>; 196 197 // The WebKit_JS calling convention only passes the first argument (the callee) 198 // in register and the remaining arguments on stack. We allow 32bit stack slots, 199 // so that WebKit can write partial values in the stack and define the other 200 // 32bit quantity as undef. 201 def CC_AArch64_WebKit_JS : CallingConv<[ 202 // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0). 203 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 204 CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>, 205 CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>, 206 207 // Pass the remaining arguments on the stack instead. 208 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 209 CCIfType<[i64, f64], CCAssignToStack<8, 8>> 210 ]>; 211 212 def RetCC_AArch64_WebKit_JS : CallingConv<[ 213 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 214 [X0, X1, X2, X3, X4, X5, X6, X7]>>, 215 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 216 [W0, W1, W2, W3, W4, W5, W6, W7]>>, 217 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 218 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 219 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 220 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> 221 ]>; 222 223 //===----------------------------------------------------------------------===// 224 // ARM64 Calling Convention for GHC 225 //===----------------------------------------------------------------------===// 226 227 // This calling convention is specific to the Glasgow Haskell Compiler. 228 // The only documentation is the GHC source code, specifically the C header 229 // file: 230 // 231 // https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h 232 // 233 // which defines the registers for the Spineless Tagless G-Machine (STG) that 234 // GHC uses to implement lazy evaluation. The generic STG machine has a set of 235 // registers which are mapped to appropriate set of architecture specific 236 // registers for each CPU architecture. 237 // 238 // The STG Machine is documented here: 239 // 240 // https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode 241 // 242 // The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI 243 // register mapping". 244 245 def CC_AArch64_GHC : CallingConv<[ 246 // Handle all vector types as either f64 or v2f64. 247 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 248 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>, 249 250 CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>, 251 CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>, 252 CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>, 253 254 // Promote i8/i16/i32 arguments to i64. 255 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 256 257 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim 258 CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>> 259 ]>; 260 261 // FIXME: LR is only callee-saved in the sense that *we* preserve it and are 262 // presumably a callee to someone. External functions may not do so, but this 263 // is currently safe since BL has LR as an implicit-def and what happens after a 264 // tail call doesn't matter. 265 // 266 // It would be better to model its preservation semantics properly (create a 267 // vreg on entry, use it in RET & tail call generation; make that vreg def if we 268 // end up saving LR as part of a call frame). Watch this space... 269 def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22, 270 X23, X24, X25, X26, X27, X28, 271 D8, D9, D10, D11, 272 D12, D13, D14, D15)>; 273 274 // Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since 275 // 'this' and the pointer return value are both passed in X0 in these cases, 276 // this can be partially modelled by treating X0 as a callee-saved register; 277 // only the resulting RegMask is used; the SaveList is ignored 278 // 279 // (For generic ARM 64-bit ABI code, clang will not generate constructors or 280 // destructors with 'this' returns, so this RegMask will not be used in that 281 // case) 282 def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>; 283 284 def CSR_AArch64_AAPCS_SwiftError 285 : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X19)>; 286 287 // The function used by Darwin to obtain the address of a thread-local variable 288 // guarantees more than a normal AAPCS function. x16 and x17 are used on the 289 // fast path for calculation, but other registers except X0 (argument/return) 290 // and LR (it is a call, after all) are preserved. 291 def CSR_AArch64_TLS_Darwin 292 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17), 293 FP, 294 (sequence "Q%u", 0, 31))>; 295 296 // We can only handle a register pair with adjacent registers, the register pair 297 // should belong to the same class as well. Since the access function on the 298 // fast path calls a function that follows CSR_AArch64_TLS_Darwin, 299 // CSR_AArch64_CXX_TLS_Darwin should be a subset of CSR_AArch64_TLS_Darwin. 300 def CSR_AArch64_CXX_TLS_Darwin 301 : CalleeSavedRegs<(add CSR_AArch64_AAPCS, 302 (sub (sequence "X%u", 1, 28), X15, X16, X17, X18), 303 (sequence "D%u", 0, 31))>; 304 305 // CSRs that are handled by prologue, epilogue. 306 def CSR_AArch64_CXX_TLS_Darwin_PE 307 : CalleeSavedRegs<(add LR, FP)>; 308 309 // CSRs that are handled explicitly via copies. 310 def CSR_AArch64_CXX_TLS_Darwin_ViaCopy 311 : CalleeSavedRegs<(sub CSR_AArch64_CXX_TLS_Darwin, LR, FP)>; 312 313 // The ELF stub used for TLS-descriptor access saves every feasible 314 // register. Only X0 and LR are clobbered. 315 def CSR_AArch64_TLS_ELF 316 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP, 317 (sequence "Q%u", 0, 31))>; 318 319 def CSR_AArch64_AllRegs 320 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP, 321 (sequence "X%u", 0, 28), FP, LR, SP, 322 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31), 323 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31), 324 (sequence "Q%u", 0, 31))>; 325 326 def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>; 327 328 def CSR_AArch64_RT_MostRegs : CalleeSavedRegs<(add CSR_AArch64_AAPCS, 329 (sequence "X%u", 9, 15))>; 330 331