Home | History | Annotate | Download | only in AArch64
      1 //=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This describes the calling conventions for AArch64 architecture.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 /// CCIfAlign - Match of the original alignment of the arg
     15 class CCIfAlign<string Align, CCAction A> :
     16   CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
     17 /// CCIfBigEndian - Match only if we're in big endian mode.
     18 class CCIfBigEndian<CCAction A> :
     19   CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>;
     20 
     21 //===----------------------------------------------------------------------===//
     22 // ARM AAPCS64 Calling Convention
     23 //===----------------------------------------------------------------------===//
     24 
     25 def CC_AArch64_AAPCS : CallingConv<[
     26   CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
     27   CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
     28 
     29   // Big endian vectors must be passed as if they were 1-element vectors so that
     30   // their lanes are in a consistent order.
     31   CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
     32                          CCBitConvertToType<f64>>>,
     33   CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
     34                          CCBitConvertToType<f128>>>,
     35 
     36   // An SRet is passed in X8, not X0 like a normal pointer parameter.
     37   CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
     38 
     39   // Put ByVal arguments directly on the stack. Minimum size and alignment of a
     40   // slot is 64-bit.
     41   CCIfByVal<CCPassByVal<8, 8>>,
     42 
     43   // The 'nest' parameter, if any, is passed in X18.
     44   // Darwin uses X18 as the platform register and hence 'nest' isn't currently
     45   // supported there.
     46   CCIfNest<CCAssignToReg<[X18]>>,
     47 
     48   CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
     49 
     50   // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
     51   // up to eight each of GPR and FPR.
     52   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
     53   CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
     54                                           [X0, X1, X2, X3, X4, X5, X6, X7]>>,
     55   // i128 is split to two i64s, we can't fit half to register X7.
     56   CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
     57                                                     [X0, X1, X3, X5]>>>,
     58 
     59   // i128 is split to two i64s, and its stack alignment is 16 bytes.
     60   CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
     61 
     62   CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
     63                                           [W0, W1, W2, W3, W4, W5, W6, W7]>>,
     64   CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
     65                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
     66   CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
     67                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
     68   CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
     69                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
     70   CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
     71            CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
     72                                    [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
     73   CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
     74            CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
     75 
     76   // If more than will fit in registers, pass them on the stack instead.
     77   CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>,
     78   CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
     79   CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
     80            CCAssignToStack<8, 8>>,
     81   CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
     82            CCAssignToStack<16, 16>>
     83 ]>;
     84 
     85 def RetCC_AArch64_AAPCS : CallingConv<[
     86   CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
     87   CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
     88 
     89   // Big endian vectors must be passed as if they were 1-element vectors so that
     90   // their lanes are in a consistent order.
     91   CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
     92                          CCBitConvertToType<f64>>>,
     93   CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
     94                          CCBitConvertToType<f128>>>,
     95 
     96   CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
     97                                           [X0, X1, X2, X3, X4, X5, X6, X7]>>,
     98   CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
     99                                           [W0, W1, W2, W3, W4, W5, W6, W7]>>,
    100   CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
    101                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    102   CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
    103                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    104   CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
    105                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    106   CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
    107       CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
    108                               [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    109   CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
    110       CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
    111 ]>;
    112 
    113 
    114 // Darwin uses a calling convention which differs in only two ways
    115 // from the standard one at this level:
    116 //     + i128s (i.e. split i64s) don't need even registers.
    117 //     + Stack slots are sized as needed rather than being at least 64-bit.
    118 def CC_AArch64_DarwinPCS : CallingConv<[
    119   CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
    120   CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
    121 
    122   // An SRet is passed in X8, not X0 like a normal pointer parameter.
    123   CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
    124 
    125   // Put ByVal arguments directly on the stack. Minimum size and alignment of a
    126   // slot is 64-bit.
    127   CCIfByVal<CCPassByVal<8, 8>>,
    128 
    129   CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
    130 
    131   // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
    132   // up to eight each of GPR and FPR.
    133   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    134   CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
    135                                           [X0, X1, X2, X3, X4, X5, X6, X7]>>,
    136   // i128 is split to two i64s, we can't fit half to register X7.
    137   CCIfType<[i64],
    138            CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6],
    139                                              [W0, W1, W2, W3, W4, W5, W6]>>>,
    140   // i128 is split to two i64s, and its stack alignment is 16 bytes.
    141   CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
    142 
    143   CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
    144                                           [W0, W1, W2, W3, W4, W5, W6, W7]>>,
    145   CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
    146                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    147   CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
    148                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    149   CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
    150                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    151   CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
    152            CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
    153                                    [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    154   CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
    155            CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    156 
    157   // If more than will fit in registers, pass them on the stack instead.
    158   CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>,
    159   CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>,
    160   CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
    161   CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
    162            CCAssignToStack<8, 8>>,
    163   CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
    164            CCAssignToStack<16, 16>>
    165 ]>;
    166 
    167 def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
    168   CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
    169   CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
    170 
    171   CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>,
    172 
    173   // Handle all scalar types as either i64 or f64.
    174   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    175   CCIfType<[f16, f32],     CCPromoteToType<f64>>,
    176 
    177   // Everything is on the stack.
    178   // i128 is split to two i64s, and its stack alignment is 16 bytes.
    179   CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
    180   CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
    181            CCAssignToStack<8, 8>>,
    182   CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
    183            CCAssignToStack<16, 16>>
    184 ]>;
    185 
    186 // The WebKit_JS calling convention only passes the first argument (the callee)
    187 // in register and the remaining arguments on stack. We allow 32bit stack slots,
    188 // so that WebKit can write partial values in the stack and define the other
    189 // 32bit quantity as undef.
    190 def CC_AArch64_WebKit_JS : CallingConv<[
    191   // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
    192   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    193   CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>,
    194   CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>,
    195 
    196   // Pass the remaining arguments on the stack instead.
    197   CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
    198   CCIfType<[i64, f64], CCAssignToStack<8, 8>>
    199 ]>;
    200 
    201 def RetCC_AArch64_WebKit_JS : CallingConv<[
    202   CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
    203                                           [X0, X1, X2, X3, X4, X5, X6, X7]>>,
    204   CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
    205                                           [W0, W1, W2, W3, W4, W5, W6, W7]>>,
    206   CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
    207                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
    208   CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
    209                                           [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
    210 ]>;
    211 
    212 //===----------------------------------------------------------------------===//
    213 // ARM64 Calling Convention for GHC
    214 //===----------------------------------------------------------------------===//
    215 
    216 // This calling convention is specific to the Glasgow Haskell Compiler.
    217 // The only documentation is the GHC source code, specifically the C header
    218 // file:
    219 //
    220 //     https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h
    221 //
    222 // which defines the registers for the Spineless Tagless G-Machine (STG) that
    223 // GHC uses to implement lazy evaluation. The generic STG machine has a set of
    224 // registers which are mapped to appropriate set of architecture specific
    225 // registers for each CPU architecture.
    226 //
    227 // The STG Machine is documented here:
    228 //
    229 //    https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode
    230 //
    231 // The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI
    232 // register mapping".
    233 
    234 def CC_AArch64_GHC : CallingConv<[
    235   // Handle all vector types as either f64 or v2f64.
    236   CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
    237   CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>,
    238 
    239   CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>,
    240   CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>,
    241   CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>,
    242 
    243   // Promote i8/i16/i32 arguments to i64.
    244   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    245 
    246   // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
    247   CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>>
    248 ]>;
    249 
    250 // FIXME: LR is only callee-saved in the sense that *we* preserve it and are
    251 // presumably a callee to someone. External functions may not do so, but this
    252 // is currently safe since BL has LR as an implicit-def and what happens after a
    253 // tail call doesn't matter.
    254 //
    255 // It would be better to model its preservation semantics properly (create a
    256 // vreg on entry, use it in RET & tail call generation; make that vreg def if we
    257 // end up saving LR as part of a call frame). Watch this space...
    258 def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
    259                                            X23, X24, X25, X26, X27, X28,
    260                                            D8,  D9,  D10, D11,
    261                                            D12, D13, D14, D15)>;
    262 
    263 // Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
    264 // 'this' and the pointer return value are both passed in X0 in these cases,
    265 // this can be partially modelled by treating X0 as a callee-saved register;
    266 // only the resulting RegMask is used; the SaveList is ignored
    267 //
    268 // (For generic ARM 64-bit ABI code, clang will not generate constructors or
    269 // destructors with 'this' returns, so this RegMask will not be used in that
    270 // case)
    271 def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
    272 
    273 // The function used by Darwin to obtain the address of a thread-local variable
    274 // guarantees more than a normal AAPCS function. x16 and x17 are used on the
    275 // fast path for calculation, but other registers except X0 (argument/return)
    276 // and LR (it is a call, after all) are preserved.
    277 def CSR_AArch64_TLS_Darwin
    278     : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
    279                            FP,
    280                            (sequence "Q%u", 0, 31))>;
    281 
    282 // We can only handle a register pair with adjacent registers, the register pair
    283 // should belong to the same class as well. Since the access function on the
    284 // fast path calls a function that follows CSR_AArch64_TLS_Darwin,
    285 // CSR_AArch64_CXX_TLS_Darwin should be a subset of CSR_AArch64_TLS_Darwin.
    286 def CSR_AArch64_CXX_TLS_Darwin
    287     : CalleeSavedRegs<(add CSR_AArch64_AAPCS,
    288                            (sub (sequence "X%u", 1, 28), X15, X16, X17, X18),
    289                            (sequence "D%u", 0, 31))>;
    290 
    291 // CSRs that are handled by prologue, epilogue.
    292 def CSR_AArch64_CXX_TLS_Darwin_PE
    293     : CalleeSavedRegs<(add LR, FP)>;
    294 
    295 // CSRs that are handled explicitly via copies.
    296 def CSR_AArch64_CXX_TLS_Darwin_ViaCopy
    297     : CalleeSavedRegs<(sub CSR_AArch64_CXX_TLS_Darwin, LR, FP)>;
    298 
    299 // The ELF stub used for TLS-descriptor access saves every feasible
    300 // register. Only X0 and LR are clobbered.
    301 def CSR_AArch64_TLS_ELF
    302     : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
    303                            (sequence "Q%u", 0, 31))>;
    304 
    305 def CSR_AArch64_AllRegs
    306     : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
    307                            (sequence "X%u", 0, 28), FP, LR, SP,
    308                            (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
    309                            (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
    310                            (sequence "Q%u", 0, 31))>;
    311 
    312 def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>;
    313