Home | History | Annotate | Download | only in X86
      1 //===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This describes the calling conventions for the X86-32 and X86-64
     11 // architectures.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 /// CCIfSubtarget - Match if the current subtarget has a feature F.
     16 class CCIfSubtarget<string F, CCAction A>
     17  : CCIf<!strconcat("State.getTarget().getSubtarget<X86Subtarget>().", F), A>;
     18 
     19 //===----------------------------------------------------------------------===//
     20 // Return Value Calling Conventions
     21 //===----------------------------------------------------------------------===//
     22 
     23 // Return-value conventions common to all X86 CC's.
     24 def RetCC_X86Common : CallingConv<[
     25   // Scalar values are returned in AX first, then DX.  For i8, the ABI
     26   // requires the values to be in AL and AH, however this code uses AL and DL
     27   // instead. This is because using AH for the second register conflicts with
     28   // the way LLVM does multiple return values -- a return of {i16,i8} would end
     29   // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
     30   // for functions that return two i8 values are currently expected to pack the
     31   // values into an i16 (which uses AX, and thus AL:AH).
     32   CCIfType<[i8] , CCAssignToReg<[AL, DL]>>,
     33   CCIfType<[i16], CCAssignToReg<[AX, DX]>>,
     34   CCIfType<[i32], CCAssignToReg<[EAX, EDX]>>,
     35   CCIfType<[i64], CCAssignToReg<[RAX, RDX]>>,
     36 
     37   // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
     38   // can only be used by ABI non-compliant code. If the target doesn't have XMM
     39   // registers, it won't have vector types.
     40   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
     41             CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
     42 
     43   // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
     44   // can only be used by ABI non-compliant code. This vector type is only
     45   // supported while using the AVX target feature.
     46   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
     47             CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
     48 
     49   // MMX vector types are always returned in MM0. If the target doesn't have
     50   // MM0, it doesn't support these vector types.
     51   CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
     52 
     53   // Long double types are always returned in ST0 (even with SSE).
     54   CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
     55 ]>;
     56 
     57 // X86-32 C return-value convention.
     58 def RetCC_X86_32_C : CallingConv<[
     59   // The X86-32 calling convention returns FP values in ST0, unless marked
     60   // with "inreg" (used here to distinguish one kind of reg from another,
     61   // weirdly; this is really the sse-regparm calling convention) in which
     62   // case they use XMM0, otherwise it is the same as the common X86 calling
     63   // conv.
     64   CCIfInReg<CCIfSubtarget<"hasSSE2()",
     65     CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
     66   CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>,
     67   CCDelegateTo<RetCC_X86Common>
     68 ]>;
     69 
     70 // X86-32 FastCC return-value convention.
     71 def RetCC_X86_32_Fast : CallingConv<[
     72   // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
     73   // SSE2.
     74   // This can happen when a float, 2 x float, or 3 x float vector is split by
     75   // target lowering, and is returned in 1-3 sse regs.
     76   CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
     77   CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
     78 
     79   // For integers, ECX can be used as an extra return register
     80   CCIfType<[i8],  CCAssignToReg<[AL, DL, CL]>>,
     81   CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
     82   CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
     83 
     84   // Otherwise, it is the same as the common X86 calling convention.
     85   CCDelegateTo<RetCC_X86Common>
     86 ]>;
     87 
     88 // X86-64 C return-value convention.
     89 def RetCC_X86_64_C : CallingConv<[
     90   // The X86-64 calling convention always returns FP values in XMM0.
     91   CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
     92   CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
     93 
     94   // MMX vector types are always returned in XMM0.
     95   CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
     96   CCDelegateTo<RetCC_X86Common>
     97 ]>;
     98 
     99 // X86-Win64 C return-value convention.
    100 def RetCC_X86_Win64_C : CallingConv<[
    101   // The X86-Win64 calling convention always returns __m64 values in RAX.
    102   CCIfType<[x86mmx], CCBitConvertToType<i64>>,
    103 
    104   // Otherwise, everything is the same as 'normal' X86-64 C CC.
    105   CCDelegateTo<RetCC_X86_64_C>
    106 ]>;
    107 
    108 
    109 // This is the root return-value convention for the X86-32 backend.
    110 def RetCC_X86_32 : CallingConv<[
    111   // If FastCC, use RetCC_X86_32_Fast.
    112   CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
    113   // Otherwise, use RetCC_X86_32_C.
    114   CCDelegateTo<RetCC_X86_32_C>
    115 ]>;
    116 
    117 // This is the root return-value convention for the X86-64 backend.
    118 def RetCC_X86_64 : CallingConv<[
    119   // Mingw64 and native Win64 use Win64 CC
    120   CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
    121 
    122   // Otherwise, drop to normal X86-64 CC
    123   CCDelegateTo<RetCC_X86_64_C>
    124 ]>;
    125 
    126 // This is the return-value convention used for the entire X86 backend.
    127 def RetCC_X86 : CallingConv<[
    128   CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
    129   CCDelegateTo<RetCC_X86_32>
    130 ]>;
    131 
    132 //===----------------------------------------------------------------------===//
    133 // X86-64 Argument Calling Conventions
    134 //===----------------------------------------------------------------------===//
    135 
    136 def CC_X86_64_C : CallingConv<[
    137   // Handles byval parameters.
    138   CCIfByVal<CCPassByVal<8, 8>>,
    139 
    140   // Promote i8/i16 arguments to i32.
    141   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    142 
    143   // The 'nest' parameter, if any, is passed in R10.
    144   CCIfNest<CCAssignToReg<[R10]>>,
    145 
    146   // The first 6 integer arguments are passed in integer registers.
    147   CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
    148   CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
    149 
    150   // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
    151   CCIfType<[x86mmx],
    152             CCIfSubtarget<"isTargetDarwin()",
    153             CCIfSubtarget<"hasSSE2()",
    154             CCPromoteToType<v2i64>>>>,
    155 
    156   // The first 8 FP/Vector arguments are passed in XMM registers.
    157   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    158             CCIfSubtarget<"hasSSE1()",
    159             CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
    160 
    161   // The first 8 256-bit vector arguments are passed in YMM registers, unless
    162   // this is a vararg function.
    163   // FIXME: This isn't precisely correct; the x86-64 ABI document says that
    164   // fixed arguments to vararg functions are supposed to be passed in
    165   // registers.  Actually modeling that would be a lot of work, though.
    166   CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    167                           CCIfSubtarget<"hasAVX()",
    168                           CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
    169                                          YMM4, YMM5, YMM6, YMM7]>>>>,
    170 
    171   // Integer/FP values get stored in stack slots that are 8 bytes in size and
    172   // 8-byte aligned if there are no more registers to hold them.
    173   CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
    174 
    175   // Long doubles get stack slots whose size and alignment depends on the
    176   // subtarget.
    177   CCIfType<[f80], CCAssignToStack<0, 0>>,
    178 
    179   // Vectors get 16-byte stack slots that are 16-byte aligned.
    180   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
    181 
    182   // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
    183   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    184            CCAssignToStack<32, 32>>
    185 ]>;
    186 
    187 // Calling convention used on Win64
    188 def CC_X86_Win64_C : CallingConv<[
    189   // FIXME: Handle byval stuff.
    190   // FIXME: Handle varargs.
    191 
    192   // Promote i8/i16 arguments to i32.
    193   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    194 
    195   // The 'nest' parameter, if any, is passed in R10.
    196   CCIfNest<CCAssignToReg<[R10]>>,
    197 
    198   // 128 bit vectors are passed by pointer
    199   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
    200 
    201 
    202   // 256 bit vectors are passed by pointer
    203   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
    204 
    205   // The first 4 MMX vector arguments are passed in GPRs.
    206   CCIfType<[x86mmx], CCBitConvertToType<i64>>,
    207 
    208   // The first 4 integer arguments are passed in integer registers.
    209   CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
    210                                           [XMM0, XMM1, XMM2, XMM3]>>,
    211   
    212   // Do not pass the sret argument in RCX, the Win64 thiscall calling
    213   // convention requires "this" to be passed in RCX.                                        
    214   CCIfCC<"CallingConv::X86_ThisCall", 
    215     CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8  , R9  ],
    216                                                      [XMM1, XMM2, XMM3]>>>>,
    217 
    218   CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8  , R9  ],
    219                                           [XMM0, XMM1, XMM2, XMM3]>>,
    220 
    221   // The first 4 FP/Vector arguments are passed in XMM registers.
    222   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    223            CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
    224                                    [RCX , RDX , R8  , R9  ]>>,
    225 
    226   // Integer/FP values get stored in stack slots that are 8 bytes in size and
    227   // 8-byte aligned if there are no more registers to hold them.
    228   CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
    229 
    230   // Long doubles get stack slots whose size and alignment depends on the
    231   // subtarget.
    232   CCIfType<[f80], CCAssignToStack<0, 0>>
    233 ]>;
    234 
    235 def CC_X86_64_GHC : CallingConv<[
    236   // Promote i8/i16/i32 arguments to i64.
    237   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    238 
    239   // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
    240   CCIfType<[i64],
    241             CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
    242 
    243   // Pass in STG registers: F1, F2, F3, F4, D1, D2
    244   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    245             CCIfSubtarget<"hasSSE1()",
    246             CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
    247 ]>;
    248 
    249 //===----------------------------------------------------------------------===//
    250 // X86 C Calling Convention
    251 //===----------------------------------------------------------------------===//
    252 
    253 /// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
    254 /// values are spilled on the stack, and the first 4 vector values go in XMM
    255 /// regs.
    256 def CC_X86_32_Common : CallingConv<[
    257   // Handles byval parameters.
    258   CCIfByVal<CCPassByVal<4, 4>>,
    259 
    260   // The first 3 float or double arguments, if marked 'inreg' and if the call
    261   // is not a vararg call and if SSE2 is available, are passed in SSE registers.
    262   CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
    263                 CCIfSubtarget<"hasSSE2()",
    264                 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
    265 
    266   // The first 3 __m64 vector arguments are passed in mmx registers if the
    267   // call is not a vararg call.
    268   CCIfNotVarArg<CCIfType<[x86mmx],
    269                 CCAssignToReg<[MM0, MM1, MM2]>>>,
    270 
    271   // Integer/Float values get stored in stack slots that are 4 bytes in
    272   // size and 4-byte aligned.
    273   CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
    274   
    275   // Doubles get 8-byte slots that are 4-byte aligned.
    276   CCIfType<[f64], CCAssignToStack<8, 4>>,
    277 
    278   // Long doubles get slots whose size depends on the subtarget.
    279   CCIfType<[f80], CCAssignToStack<0, 4>>,
    280 
    281   // The first 4 SSE vector arguments are passed in XMM registers.
    282   CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    283                 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
    284 
    285   // The first 4 AVX 256-bit vector arguments are passed in YMM registers.
    286   CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    287                 CCIfSubtarget<"hasAVX()",
    288                 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
    289 
    290   // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
    291   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
    292 
    293   // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
    294   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    295            CCAssignToStack<32, 32>>,
    296 
    297   // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
    298   // passed in the parameter area.
    299   CCIfType<[x86mmx], CCAssignToStack<8, 4>>]>;
    300 
    301 def CC_X86_32_C : CallingConv<[
    302   // Promote i8/i16 arguments to i32.
    303   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    304 
    305   // The 'nest' parameter, if any, is passed in ECX.
    306   CCIfNest<CCAssignToReg<[ECX]>>,
    307 
    308   // The first 3 integer arguments, if marked 'inreg' and if the call is not
    309   // a vararg call, are passed in integer registers.
    310   CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
    311 
    312   // Otherwise, same as everything else.
    313   CCDelegateTo<CC_X86_32_Common>
    314 ]>;
    315 
    316 def CC_X86_32_FastCall : CallingConv<[
    317   // Promote i8/i16 arguments to i32.
    318   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    319 
    320   // The 'nest' parameter, if any, is passed in EAX.
    321   CCIfNest<CCAssignToReg<[EAX]>>,
    322 
    323   // The first 2 integer arguments are passed in ECX/EDX
    324   CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
    325 
    326   // Otherwise, same as everything else.
    327   CCDelegateTo<CC_X86_32_Common>
    328 ]>;
    329 
    330 def CC_X86_32_ThisCall : CallingConv<[
    331   // Promote i8/i16 arguments to i32.
    332   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    333 
    334   // Pass sret arguments indirectly through EAX
    335   CCIfSRet<CCAssignToReg<[EAX]>>,
    336 
    337   // The first integer argument is passed in ECX
    338   CCIfType<[i32], CCAssignToReg<[ECX]>>,
    339 
    340   // Otherwise, same as everything else.
    341   CCDelegateTo<CC_X86_32_Common>
    342 ]>;
    343 
    344 def CC_X86_32_FastCC : CallingConv<[
    345   // Handles byval parameters.  Note that we can't rely on the delegation
    346   // to CC_X86_32_Common for this because that happens after code that
    347   // puts arguments in registers.
    348   CCIfByVal<CCPassByVal<4, 4>>,
    349 
    350   // Promote i8/i16 arguments to i32.
    351   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    352 
    353   // The 'nest' parameter, if any, is passed in EAX.
    354   CCIfNest<CCAssignToReg<[EAX]>>,
    355 
    356   // The first 2 integer arguments are passed in ECX/EDX
    357   CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
    358 
    359   // The first 3 float or double arguments, if the call is not a vararg
    360   // call and if SSE2 is available, are passed in SSE registers.
    361   CCIfNotVarArg<CCIfType<[f32,f64],
    362                 CCIfSubtarget<"hasSSE2()",
    363                 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
    364 
    365   // Doubles get 8-byte slots that are 8-byte aligned.
    366   CCIfType<[f64], CCAssignToStack<8, 8>>,
    367 
    368   // Otherwise, same as everything else.
    369   CCDelegateTo<CC_X86_32_Common>
    370 ]>;
    371 
    372 def CC_X86_32_GHC : CallingConv<[
    373   // Promote i8/i16 arguments to i32.
    374   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    375 
    376   // Pass in STG registers: Base, Sp, Hp, R1
    377   CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
    378 ]>;
    379 
    380 //===----------------------------------------------------------------------===//
    381 // X86 Root Argument Calling Conventions
    382 //===----------------------------------------------------------------------===//
    383 
    384 // This is the root argument convention for the X86-32 backend.
    385 def CC_X86_32 : CallingConv<[
    386   CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
    387   CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
    388   CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
    389   CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
    390 
    391   // Otherwise, drop to normal X86-32 CC
    392   CCDelegateTo<CC_X86_32_C>
    393 ]>;
    394 
    395 // This is the root argument convention for the X86-64 backend.
    396 def CC_X86_64 : CallingConv<[
    397   CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
    398 
    399   // Mingw64 and native Win64 use Win64 CC
    400   CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
    401 
    402   // Otherwise, drop to normal X86-64 CC
    403   CCDelegateTo<CC_X86_64_C>
    404 ]>;
    405 
    406 // This is the argument convention used for the entire X86 backend.
    407 def CC_X86 : CallingConv<[
    408   CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
    409   CCDelegateTo<CC_X86_32>
    410 ]>;
    411 
    412 //===----------------------------------------------------------------------===//
    413 // Callee-saved Registers.
    414 //===----------------------------------------------------------------------===//
    415 
    416 def CSR_Ghc : CalleeSavedRegs<(add)>;
    417 
    418 def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
    419 def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
    420 
    421 def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
    422 def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
    423 
    424 def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
    425                                      (sequence "XMM%u", 6, 15))>;
    426