Home | History | Annotate | Download | only in X86
      1 //===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This describes the calling conventions for the X86-32 and X86-64
     11 // architectures.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 
     15 /// CCIfSubtarget - Match if the current subtarget has a feature F.
     16 class CCIfSubtarget<string F, CCAction A>
     17     : CCIf<!strconcat("static_cast<const X86Subtarget&>"
     18                        "(State.getMachineFunction().getSubtarget()).", F),
     19            A>;
     20 
     21 //===----------------------------------------------------------------------===//
     22 // Return Value Calling Conventions
     23 //===----------------------------------------------------------------------===//
     24 
     25 // Return-value conventions common to all X86 CC's.
     26 def RetCC_X86Common : CallingConv<[
     27   // Scalar values are returned in AX first, then DX.  For i8, the ABI
     28   // requires the values to be in AL and AH, however this code uses AL and DL
     29   // instead. This is because using AH for the second register conflicts with
     30   // the way LLVM does multiple return values -- a return of {i16,i8} would end
     31   // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
     32   // for functions that return two i8 values are currently expected to pack the
     33   // values into an i16 (which uses AX, and thus AL:AH).
     34   //
     35   // For code that doesn't care about the ABI, we allow returning more than two
     36   // integer values in registers.
     37   CCIfType<[i1],  CCPromoteToType<i8>>,
     38   CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
     39   CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
     40   CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
     41   CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
     42 
     43   // Boolean vectors of AVX-512 are returned in SIMD registers.
     44   // The call from AVX to AVX-512 function should work,
     45   // since the boolean types in AVX/AVX2 are promoted by default.
     46   CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
     47   CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
     48   CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
     49   CCIfType<[v16i1], CCPromoteToType<v16i8>>,
     50   CCIfType<[v32i1], CCPromoteToType<v32i8>>,
     51   CCIfType<[v64i1], CCPromoteToType<v64i8>>,
     52 
     53   // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
     54   // can only be used by ABI non-compliant code. If the target doesn't have XMM
     55   // registers, it won't have vector types.
     56   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
     57             CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
     58 
     59   // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
     60   // can only be used by ABI non-compliant code. This vector type is only
     61   // supported while using the AVX target feature.
     62   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
     63             CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
     64 
     65   // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
     66   // can only be used by ABI non-compliant code. This vector type is only
     67   // supported while using the AVX-512 target feature.
     68   CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
     69             CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
     70 
     71   // MMX vector types are always returned in MM0. If the target doesn't have
     72   // MM0, it doesn't support these vector types.
     73   CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
     74 
     75   // Long double types are always returned in FP0 (even with SSE).
     76   CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>
     77 ]>;
     78 
     79 // X86-32 C return-value convention.
     80 def RetCC_X86_32_C : CallingConv<[
     81   // The X86-32 calling convention returns FP values in FP0, unless marked
     82   // with "inreg" (used here to distinguish one kind of reg from another,
     83   // weirdly; this is really the sse-regparm calling convention) in which
     84   // case they use XMM0, otherwise it is the same as the common X86 calling
     85   // conv.
     86   CCIfInReg<CCIfSubtarget<"hasSSE2()",
     87     CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
     88   CCIfType<[f32,f64], CCAssignToReg<[FP0, FP1]>>,
     89   CCDelegateTo<RetCC_X86Common>
     90 ]>;
     91 
     92 // X86-32 FastCC return-value convention.
     93 def RetCC_X86_32_Fast : CallingConv<[
     94   // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
     95   // SSE2.
     96   // This can happen when a float, 2 x float, or 3 x float vector is split by
     97   // target lowering, and is returned in 1-3 sse regs.
     98   CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
     99   CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
    100 
    101   // For integers, ECX can be used as an extra return register
    102   CCIfType<[i8],  CCAssignToReg<[AL, DL, CL]>>,
    103   CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
    104   CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
    105 
    106   // Otherwise, it is the same as the common X86 calling convention.
    107   CCDelegateTo<RetCC_X86Common>
    108 ]>;
    109 
    110 // Intel_OCL_BI return-value convention.
    111 def RetCC_Intel_OCL_BI : CallingConv<[
    112   // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
    113   CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
    114             CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
    115 
    116   // 256-bit FP vectors
    117   // No more than 4 registers
    118   CCIfType<[v8f32, v4f64, v8i32, v4i64],
    119             CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
    120 
    121   // 512-bit FP vectors
    122   CCIfType<[v16f32, v8f64, v16i32, v8i64],
    123             CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
    124 
    125   // i32, i64 in the standard way
    126   CCDelegateTo<RetCC_X86Common>
    127 ]>;
    128 
    129 // X86-32 HiPE return-value convention.
    130 def RetCC_X86_32_HiPE : CallingConv<[
    131   // Promote all types to i32
    132   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    133 
    134   // Return: HP, P, VAL1, VAL2
    135   CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
    136 ]>;
    137 
    138 // X86-32 HiPE return-value convention.
    139 def RetCC_X86_32_VectorCall : CallingConv<[
    140   // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
    141   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    142             CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
    143 
    144   // 256-bit FP vectors
    145   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    146             CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
    147 
    148   // 512-bit FP vectors
    149   CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    150             CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
    151 
    152   // Return integers in the standard way.
    153   CCDelegateTo<RetCC_X86Common>
    154 ]>;
    155 
    156 // X86-64 C return-value convention.
    157 def RetCC_X86_64_C : CallingConv<[
    158   // The X86-64 calling convention always returns FP values in XMM0.
    159   CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
    160   CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
    161   CCIfType<[f128], CCAssignToReg<[XMM0, XMM1]>>,
    162 
    163   // MMX vector types are always returned in XMM0.
    164   CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
    165 
    166   CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
    167 
    168   CCDelegateTo<RetCC_X86Common>
    169 ]>;
    170 
    171 // X86-Win64 C return-value convention.
    172 def RetCC_X86_Win64_C : CallingConv<[
    173   // The X86-Win64 calling convention always returns __m64 values in RAX.
    174   CCIfType<[x86mmx], CCBitConvertToType<i64>>,
    175 
    176   // Otherwise, everything is the same as 'normal' X86-64 C CC.
    177   CCDelegateTo<RetCC_X86_64_C>
    178 ]>;
    179 
    180 // X86-64 HiPE return-value convention.
    181 def RetCC_X86_64_HiPE : CallingConv<[
    182   // Promote all types to i64
    183   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    184 
    185   // Return: HP, P, VAL1, VAL2
    186   CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
    187 ]>;
    188 
    189 // X86-64 WebKit_JS return-value convention.
    190 def RetCC_X86_64_WebKit_JS : CallingConv<[
    191   // Promote all types to i64
    192   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    193 
    194   // Return: RAX
    195   CCIfType<[i64], CCAssignToReg<[RAX]>>
    196 ]>;
    197 
    198 def RetCC_X86_64_Swift : CallingConv<[
    199   // For integers, ECX, R8D can be used as extra return registers.
    200   CCIfType<[i1],  CCPromoteToType<i8>>,
    201   CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
    202   CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
    203   CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX, R8D]>>,
    204   CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX, R8]>>,
    205 
    206   // XMM0, XMM1, XMM2 and XMM3 can be used to return FP values.
    207   CCIfType<[f32], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
    208   CCIfType<[f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
    209   CCIfType<[f128], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
    210 
    211   // MMX vector types are returned in XMM0, XMM1, XMM2 and XMM3.
    212   CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
    213   CCDelegateTo<RetCC_X86Common>
    214 ]>;
    215 
    216 // X86-64 AnyReg return-value convention. No explicit register is specified for
    217 // the return-value. The register allocator is allowed and expected to choose
    218 // any free register.
    219 //
    220 // This calling convention is currently only supported by the stackmap and
    221 // patchpoint intrinsics. All other uses will result in an assert on Debug
    222 // builds. On Release builds we fallback to the X86 C calling convention.
    223 def RetCC_X86_64_AnyReg : CallingConv<[
    224   CCCustom<"CC_X86_AnyReg_Error">
    225 ]>;
    226 
    227 // X86-64 HHVM return-value convention.
    228 def RetCC_X86_64_HHVM: CallingConv<[
    229   // Promote all types to i64
    230   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    231 
    232   // Return: could return in any GP register save RSP and R12.
    233   CCIfType<[i64], CCAssignToReg<[RBX, RBP, RDI, RSI, RDX, RCX, R8, R9,
    234                                  RAX, R10, R11, R13, R14, R15]>>
    235 ]>;
    236 
    237 // This is the root return-value convention for the X86-32 backend.
    238 def RetCC_X86_32 : CallingConv<[
    239   // If FastCC, use RetCC_X86_32_Fast.
    240   CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
    241   // If HiPE, use RetCC_X86_32_HiPE.
    242   CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
    243   CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_32_VectorCall>>,
    244 
    245   // Otherwise, use RetCC_X86_32_C.
    246   CCDelegateTo<RetCC_X86_32_C>
    247 ]>;
    248 
    249 // This is the root return-value convention for the X86-64 backend.
    250 def RetCC_X86_64 : CallingConv<[
    251   // HiPE uses RetCC_X86_64_HiPE
    252   CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
    253 
    254   // Handle JavaScript calls.
    255   CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
    256   CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
    257 
    258   // Handle Swift calls.
    259   CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
    260 
    261   // Handle explicit CC selection
    262   CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
    263   CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
    264 
    265   // Handle HHVM calls.
    266   CCIfCC<"CallingConv::HHVM", CCDelegateTo<RetCC_X86_64_HHVM>>,
    267 
    268   // Mingw64 and native Win64 use Win64 CC
    269   CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
    270 
    271   // Otherwise, drop to normal X86-64 CC
    272   CCDelegateTo<RetCC_X86_64_C>
    273 ]>;
    274 
    275 // This is the return-value convention used for the entire X86 backend.
    276 def RetCC_X86 : CallingConv<[
    277 
    278   // Check if this is the Intel OpenCL built-ins calling convention
    279   CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
    280 
    281   CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
    282   CCDelegateTo<RetCC_X86_32>
    283 ]>;
    284 
    285 //===----------------------------------------------------------------------===//
    286 // X86-64 Argument Calling Conventions
    287 //===----------------------------------------------------------------------===//
    288 
    289 def CC_X86_64_C : CallingConv<[
    290   // Handles byval parameters.
    291   CCIfByVal<CCPassByVal<8, 8>>,
    292 
    293   // Promote i1/i8/i16 arguments to i32.
    294   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    295 
    296   // The 'nest' parameter, if any, is passed in R10.
    297   CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
    298   CCIfNest<CCAssignToReg<[R10]>>,
    299 
    300   // Pass SwiftSelf in a callee saved register.
    301   CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
    302 
    303   // A SwiftError is passed in R12.
    304   CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
    305 
    306   // For Swift Calling Convention, pass sret in %RAX.
    307   CCIfCC<"CallingConv::Swift",
    308     CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
    309 
    310   // The first 6 integer arguments are passed in integer registers.
    311   CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
    312   CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
    313 
    314   // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
    315   CCIfType<[x86mmx],
    316             CCIfSubtarget<"isTargetDarwin()",
    317             CCIfSubtarget<"hasSSE2()",
    318             CCPromoteToType<v2i64>>>>,
    319 
    320   // Boolean vectors of AVX-512 are passed in SIMD registers.
    321   // The call from AVX to AVX-512 function should work,
    322   // since the boolean types in AVX/AVX2 are promoted by default.
    323   CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
    324   CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
    325   CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
    326   CCIfType<[v16i1], CCPromoteToType<v16i8>>,
    327   CCIfType<[v32i1], CCPromoteToType<v32i8>>,
    328   CCIfType<[v64i1], CCPromoteToType<v64i8>>,
    329 
    330   // The first 8 FP/Vector arguments are passed in XMM registers.
    331   CCIfType<[f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    332             CCIfSubtarget<"hasSSE1()",
    333             CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
    334 
    335   // The first 8 256-bit vector arguments are passed in YMM registers, unless
    336   // this is a vararg function.
    337   // FIXME: This isn't precisely correct; the x86-64 ABI document says that
    338   // fixed arguments to vararg functions are supposed to be passed in
    339   // registers.  Actually modeling that would be a lot of work, though.
    340   CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    341                           CCIfSubtarget<"hasFp256()",
    342                           CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
    343                                          YMM4, YMM5, YMM6, YMM7]>>>>,
    344 
    345   // The first 8 512-bit vector arguments are passed in ZMM registers.
    346   CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    347             CCIfSubtarget<"hasAVX512()",
    348             CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
    349 
    350   // Integer/FP values get stored in stack slots that are 8 bytes in size and
    351   // 8-byte aligned if there are no more registers to hold them.
    352   CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
    353 
    354   // Long doubles get stack slots whose size and alignment depends on the
    355   // subtarget.
    356   CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
    357 
    358   // Vectors get 16-byte stack slots that are 16-byte aligned.
    359   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
    360 
    361   // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
    362   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    363            CCAssignToStack<32, 32>>,
    364 
    365   // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
    366   CCIfType<[v16i32, v8i64, v16f32, v8f64],
    367            CCAssignToStack<64, 64>>
    368 ]>;
    369 
    370 // Calling convention for X86-64 HHVM.
    371 def CC_X86_64_HHVM : CallingConv<[
    372   // Use all/any GP registers for args, except RSP.
    373   CCIfType<[i64], CCAssignToReg<[RBX, R12, RBP, R15,
    374                                  RDI, RSI, RDX, RCX, R8, R9,
    375                                  RAX, R10, R11, R13, R14]>>
    376 ]>;
    377 
    378 // Calling convention for helper functions in HHVM.
    379 def CC_X86_64_HHVM_C : CallingConv<[
    380   // Pass the first argument in RBP.
    381   CCIfType<[i64], CCAssignToReg<[RBP]>>,
    382 
    383   // Otherwise it's the same as the regular C calling convention.
    384   CCDelegateTo<CC_X86_64_C>
    385 ]>;
    386 
    387 // Calling convention used on Win64
    388 def CC_X86_Win64_C : CallingConv<[
    389   // FIXME: Handle byval stuff.
    390   // FIXME: Handle varargs.
    391 
    392   // Promote i1/i8/i16 arguments to i32.
    393   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    394 
    395   // The 'nest' parameter, if any, is passed in R10.
    396   CCIfNest<CCAssignToReg<[R10]>>,
    397 
    398   // 128 bit vectors are passed by pointer
    399   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
    400 
    401 
    402   // 256 bit vectors are passed by pointer
    403   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
    404 
    405   // 512 bit vectors are passed by pointer
    406   CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
    407 
    408   // The first 4 MMX vector arguments are passed in GPRs.
    409   CCIfType<[x86mmx], CCBitConvertToType<i64>>,
    410 
    411   // The first 4 integer arguments are passed in integer registers.
    412   CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
    413                                           [XMM0, XMM1, XMM2, XMM3]>>,
    414 
    415   // Do not pass the sret argument in RCX, the Win64 thiscall calling
    416   // convention requires "this" to be passed in RCX.
    417   CCIfCC<"CallingConv::X86_ThisCall",
    418     CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8  , R9  ],
    419                                                      [XMM1, XMM2, XMM3]>>>>,
    420 
    421   CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8  , R9  ],
    422                                           [XMM0, XMM1, XMM2, XMM3]>>,
    423 
    424   // The first 4 FP/Vector arguments are passed in XMM registers.
    425   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    426            CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
    427                                    [RCX , RDX , R8  , R9  ]>>,
    428 
    429   // Integer/FP values get stored in stack slots that are 8 bytes in size and
    430   // 8-byte aligned if there are no more registers to hold them.
    431   CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
    432 
    433   // Long doubles get stack slots whose size and alignment depends on the
    434   // subtarget.
    435   CCIfType<[f80], CCAssignToStack<0, 0>>
    436 ]>;
    437 
    438 def CC_X86_Win64_VectorCall : CallingConv<[
    439   // The first 6 floating point and vector types of 128 bits or less use
    440   // XMM0-XMM5.
    441   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    442            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5]>>,
    443 
    444   // 256-bit vectors use YMM registers.
    445   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    446            CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5]>>,
    447 
    448   // 512-bit vectors use ZMM registers.
    449   CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    450            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5]>>,
    451 
    452   // Delegate to fastcall to handle integer types.
    453   CCDelegateTo<CC_X86_Win64_C>
    454 ]>;
    455 
    456 
    457 def CC_X86_64_GHC : CallingConv<[
    458   // Promote i8/i16/i32 arguments to i64.
    459   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    460 
    461   // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
    462   CCIfType<[i64],
    463             CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
    464 
    465   // Pass in STG registers: F1, F2, F3, F4, D1, D2
    466   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    467             CCIfSubtarget<"hasSSE1()",
    468             CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
    469 ]>;
    470 
    471 def CC_X86_64_HiPE : CallingConv<[
    472   // Promote i8/i16/i32 arguments to i64.
    473   CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
    474 
    475   // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
    476   CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
    477 
    478   // Integer/FP values get stored in stack slots that are 8 bytes in size and
    479   // 8-byte aligned if there are no more registers to hold them.
    480   CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
    481 ]>;
    482 
    483 def CC_X86_64_WebKit_JS : CallingConv<[
    484   // Promote i8/i16 arguments to i32.
    485   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    486 
    487   // Only the first integer argument is passed in register.
    488   CCIfType<[i32], CCAssignToReg<[EAX]>>,
    489   CCIfType<[i64], CCAssignToReg<[RAX]>>,
    490 
    491   // The remaining integer arguments are passed on the stack. 32bit integer and
    492   // floating-point arguments are aligned to 4 byte and stored in 4 byte slots.
    493   // 64bit integer and floating-point arguments are aligned to 8 byte and stored
    494   // in 8 byte stack slots.
    495   CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
    496   CCIfType<[i64, f64], CCAssignToStack<8, 8>>
    497 ]>;
    498 
    499 // No explicit register is specified for the AnyReg calling convention. The
    500 // register allocator may assign the arguments to any free register.
    501 //
    502 // This calling convention is currently only supported by the stackmap and
    503 // patchpoint intrinsics. All other uses will result in an assert on Debug
    504 // builds. On Release builds we fallback to the X86 C calling convention.
    505 def CC_X86_64_AnyReg : CallingConv<[
    506   CCCustom<"CC_X86_AnyReg_Error">
    507 ]>;
    508 
    509 //===----------------------------------------------------------------------===//
    510 // X86 C Calling Convention
    511 //===----------------------------------------------------------------------===//
    512 
    513 /// CC_X86_32_Vector_Common - In all X86-32 calling conventions, extra vector
    514 /// values are spilled on the stack.
    515 def CC_X86_32_Vector_Common : CallingConv<[
    516   // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
    517   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
    518 
    519   // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
    520   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    521            CCAssignToStack<32, 32>>,
    522 
    523   // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
    524   CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    525            CCAssignToStack<64, 64>>
    526 ]>;
    527 
    528 // CC_X86_32_Vector_Standard - The first 3 vector arguments are passed in
    529 // vector registers
    530 def CC_X86_32_Vector_Standard : CallingConv<[
    531   // SSE vector arguments are passed in XMM registers.
    532   CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    533                 CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
    534 
    535   // AVX 256-bit vector arguments are passed in YMM registers.
    536   CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    537                 CCIfSubtarget<"hasFp256()",
    538                 CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
    539 
    540   // AVX 512-bit vector arguments are passed in ZMM registers.
    541   CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    542                 CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
    543 
    544   CCDelegateTo<CC_X86_32_Vector_Common>
    545 ]>;
    546 
    547 // CC_X86_32_Vector_Darwin - The first 4 vector arguments are passed in
    548 // vector registers.
    549 def CC_X86_32_Vector_Darwin : CallingConv<[
    550   // SSE vector arguments are passed in XMM registers.
    551   CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    552                 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
    553 
    554   // AVX 256-bit vector arguments are passed in YMM registers.
    555   CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    556                 CCIfSubtarget<"hasFp256()",
    557                 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
    558 
    559   // AVX 512-bit vector arguments are passed in ZMM registers.
    560   CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    561                 CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
    562 
    563   CCDelegateTo<CC_X86_32_Vector_Common>
    564 ]>;
    565 
    566 /// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
    567 /// values are spilled on the stack.
    568 def CC_X86_32_Common : CallingConv<[
    569   // Handles byval parameters.
    570   CCIfByVal<CCPassByVal<4, 4>>,
    571 
    572   // The first 3 float or double arguments, if marked 'inreg' and if the call
    573   // is not a vararg call and if SSE2 is available, are passed in SSE registers.
    574   CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
    575                 CCIfSubtarget<"hasSSE2()",
    576                 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
    577 
    578   // The first 3 __m64 vector arguments are passed in mmx registers if the
    579   // call is not a vararg call.
    580   CCIfNotVarArg<CCIfType<[x86mmx],
    581                 CCAssignToReg<[MM0, MM1, MM2]>>>,
    582 
    583   // Integer/Float values get stored in stack slots that are 4 bytes in
    584   // size and 4-byte aligned.
    585   CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
    586 
    587   // Doubles get 8-byte slots that are 4-byte aligned.
    588   CCIfType<[f64], CCAssignToStack<8, 4>>,
    589 
    590   // Long doubles get slots whose size depends on the subtarget.
    591   CCIfType<[f80], CCAssignToStack<0, 4>>,
    592 
    593   // Boolean vectors of AVX-512 are passed in SIMD registers.
    594   // The call from AVX to AVX-512 function should work,
    595   // since the boolean types in AVX/AVX2 are promoted by default.
    596   CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
    597   CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
    598   CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
    599   CCIfType<[v16i1], CCPromoteToType<v16i8>>,
    600   CCIfType<[v32i1], CCPromoteToType<v32i8>>,
    601   CCIfType<[v64i1], CCPromoteToType<v64i8>>,
    602 
    603   // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
    604   // passed in the parameter area.
    605   CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
    606 
    607   // Darwin passes vectors in a form that differs from the i386 psABI
    608   CCIfSubtarget<"isTargetDarwin()", CCDelegateTo<CC_X86_32_Vector_Darwin>>,
    609 
    610   // Otherwise, drop to 'normal' X86-32 CC
    611   CCDelegateTo<CC_X86_32_Vector_Standard>
    612 ]>;
    613 
    614 def CC_X86_32_C : CallingConv<[
    615   // Promote i1/i8/i16 arguments to i32.
    616   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    617 
    618   // The 'nest' parameter, if any, is passed in ECX.
    619   CCIfNest<CCAssignToReg<[ECX]>>,
    620 
    621   // The first 3 integer arguments, if marked 'inreg' and if the call is not
    622   // a vararg call, are passed in integer registers.
    623   CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
    624 
    625   // Otherwise, same as everything else.
    626   CCDelegateTo<CC_X86_32_Common>
    627 ]>;
    628 
    629 def CC_X86_32_MCU : CallingConv<[
    630   // Handles byval parameters.  Note that, like FastCC, we can't rely on
    631   // the delegation to CC_X86_32_Common because that happens after code that
    632   // puts arguments in registers.
    633   CCIfByVal<CCPassByVal<4, 4>>,
    634 
    635   // Promote i1/i8/i16 arguments to i32.
    636   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    637 
    638   // If the call is not a vararg call, some arguments may be passed
    639   // in integer registers.
    640   CCIfNotVarArg<CCIfType<[i32], CCCustom<"CC_X86_32_MCUInReg">>>,
    641 
    642   // Otherwise, same as everything else.
    643   CCDelegateTo<CC_X86_32_Common>
    644 ]>;
    645 
    646 def CC_X86_32_FastCall : CallingConv<[
    647   // Promote i1/i8/i16 arguments to i32.
    648   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    649 
    650   // The 'nest' parameter, if any, is passed in EAX.
    651   CCIfNest<CCAssignToReg<[EAX]>>,
    652 
    653   // The first 2 integer arguments are passed in ECX/EDX
    654   CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
    655 
    656   // Otherwise, same as everything else.
    657   CCDelegateTo<CC_X86_32_Common>
    658 ]>;
    659 
    660 def CC_X86_32_VectorCall : CallingConv<[
    661   // The first 6 floating point and vector types of 128 bits or less use
    662   // XMM0-XMM5.
    663   CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
    664            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5]>>,
    665 
    666   // 256-bit vectors use YMM registers.
    667   CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
    668            CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5]>>,
    669 
    670   // 512-bit vectors use ZMM registers.
    671   CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    672            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5]>>,
    673 
    674   // Otherwise, pass it indirectly.
    675   CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64,
    676             v32i8, v16i16, v8i32, v4i64, v8f32, v4f64,
    677             v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
    678            CCCustom<"CC_X86_32_VectorCallIndirect">>,
    679 
    680   // Delegate to fastcall to handle integer types.
    681   CCDelegateTo<CC_X86_32_FastCall>
    682 ]>;
    683 
    684 def CC_X86_32_ThisCall_Common : CallingConv<[
    685   // The first integer argument is passed in ECX
    686   CCIfType<[i32], CCAssignToReg<[ECX]>>,
    687 
    688   // Otherwise, same as everything else.
    689   CCDelegateTo<CC_X86_32_Common>
    690 ]>;
    691 
    692 def CC_X86_32_ThisCall_Mingw : CallingConv<[
    693   // Promote i1/i8/i16 arguments to i32.
    694   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    695 
    696   CCDelegateTo<CC_X86_32_ThisCall_Common>
    697 ]>;
    698 
    699 def CC_X86_32_ThisCall_Win : CallingConv<[
    700   // Promote i1/i8/i16 arguments to i32.
    701   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    702 
    703   // Pass sret arguments indirectly through stack.
    704   CCIfSRet<CCAssignToStack<4, 4>>,
    705 
    706   CCDelegateTo<CC_X86_32_ThisCall_Common>
    707 ]>;
    708 
    709 def CC_X86_32_ThisCall : CallingConv<[
    710   CCIfSubtarget<"isTargetCygMing()", CCDelegateTo<CC_X86_32_ThisCall_Mingw>>,
    711   CCDelegateTo<CC_X86_32_ThisCall_Win>
    712 ]>;
    713 
    714 def CC_X86_32_FastCC : CallingConv<[
    715   // Handles byval parameters.  Note that we can't rely on the delegation
    716   // to CC_X86_32_Common for this because that happens after code that
    717   // puts arguments in registers.
    718   CCIfByVal<CCPassByVal<4, 4>>,
    719 
    720   // Promote i1/i8/i16 arguments to i32.
    721   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
    722 
    723   // The 'nest' parameter, if any, is passed in EAX.
    724   CCIfNest<CCAssignToReg<[EAX]>>,
    725 
    726   // The first 2 integer arguments are passed in ECX/EDX
    727   CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
    728 
    729   // The first 3 float or double arguments, if the call is not a vararg
    730   // call and if SSE2 is available, are passed in SSE registers.
    731   CCIfNotVarArg<CCIfType<[f32,f64],
    732                 CCIfSubtarget<"hasSSE2()",
    733                 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
    734 
    735   // Doubles get 8-byte slots that are 8-byte aligned.
    736   CCIfType<[f64], CCAssignToStack<8, 8>>,
    737 
    738   // Otherwise, same as everything else.
    739   CCDelegateTo<CC_X86_32_Common>
    740 ]>;
    741 
    742 def CC_X86_32_GHC : CallingConv<[
    743   // Promote i8/i16 arguments to i32.
    744   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    745 
    746   // Pass in STG registers: Base, Sp, Hp, R1
    747   CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
    748 ]>;
    749 
    750 def CC_X86_32_HiPE : CallingConv<[
    751   // Promote i8/i16 arguments to i32.
    752   CCIfType<[i8, i16], CCPromoteToType<i32>>,
    753 
    754   // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
    755   CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
    756 
    757   // Integer/Float values get stored in stack slots that are 4 bytes in
    758   // size and 4-byte aligned.
    759   CCIfType<[i32, f32], CCAssignToStack<4, 4>>
    760 ]>;
    761 
    762 // X86-64 Intel OpenCL built-ins calling convention.
    763 def CC_Intel_OCL_BI : CallingConv<[
    764 
    765   CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
    766   CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8,  R9 ]>>>,
    767 
    768   CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
    769   CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
    770 
    771   CCIfType<[i32], CCAssignToStack<4, 4>>,
    772 
    773   // The SSE vector arguments are passed in XMM registers.
    774   CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
    775            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
    776 
    777   // The 256-bit vector arguments are passed in YMM registers.
    778   CCIfType<[v8f32, v4f64, v8i32, v4i64],
    779            CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
    780 
    781   // The 512-bit vector arguments are passed in ZMM registers.
    782   CCIfType<[v16f32, v8f64, v16i32, v8i64],
    783            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
    784 
    785   // Pass masks in mask registers
    786   CCIfType<[v16i1, v8i1], CCAssignToReg<[K1]>>,
    787 
    788   CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
    789   CCIfSubtarget<"is64Bit()",       CCDelegateTo<CC_X86_64_C>>,
    790   CCDelegateTo<CC_X86_32_C>
    791 ]>;
    792 
    793 def CC_X86_32_Intr : CallingConv<[
    794   CCAssignToStack<4, 4>
    795 ]>;
    796 
    797 def CC_X86_64_Intr : CallingConv<[
    798   CCAssignToStack<8, 8>
    799 ]>;
    800 
    801 //===----------------------------------------------------------------------===//
    802 // X86 Root Argument Calling Conventions
    803 //===----------------------------------------------------------------------===//
    804 
    805 // This is the root argument convention for the X86-32 backend.
    806 def CC_X86_32 : CallingConv<[
    807   // X86_INTR calling convention is valid in MCU target and should override the
    808   // MCU calling convention. Thus, this should be checked before isTargetMCU().
    809   CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_32_Intr>>,
    810   CCIfSubtarget<"isTargetMCU()", CCDelegateTo<CC_X86_32_MCU>>,
    811   CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
    812   CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_32_VectorCall>>,
    813   CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
    814   CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
    815   CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
    816   CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
    817 
    818   // Otherwise, drop to normal X86-32 CC
    819   CCDelegateTo<CC_X86_32_C>
    820 ]>;
    821 
    822 // This is the root argument convention for the X86-64 backend.
    823 def CC_X86_64 : CallingConv<[
    824   CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
    825   CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
    826   CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>,
    827   CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
    828   CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
    829   CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
    830   CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
    831   CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>,
    832   CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>,
    833   CCIfCC<"CallingConv::X86_INTR", CCDelegateTo<CC_X86_64_Intr>>,
    834 
    835   // Mingw64 and native Win64 use Win64 CC
    836   CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
    837 
    838   // Otherwise, drop to normal X86-64 CC
    839   CCDelegateTo<CC_X86_64_C>
    840 ]>;
    841 
    842 // This is the argument convention used for the entire X86 backend.
    843 def CC_X86 : CallingConv<[
    844   CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
    845   CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
    846   CCDelegateTo<CC_X86_32>
    847 ]>;
    848 
    849 //===----------------------------------------------------------------------===//
    850 // Callee-saved Registers.
    851 //===----------------------------------------------------------------------===//
    852 
    853 def CSR_NoRegs : CalleeSavedRegs<(add)>;
    854 
    855 def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
    856 def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
    857 
    858 def CSR_64_SwiftError : CalleeSavedRegs<(sub CSR_64, R12)>;
    859 
    860 def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
    861 def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
    862 
    863 def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
    864                                      (sequence "XMM%u", 6, 15))>;
    865 
    866 // The function used by Darwin to obtain the address of a thread-local variable
    867 // uses rdi to pass a single parameter and rax for the return value. All other
    868 // GPRs are preserved.
    869 def CSR_64_TLS_Darwin : CalleeSavedRegs<(add CSR_64, RCX, RDX, RSI,
    870                                              R8, R9, R10, R11)>;
    871 
    872 // CSRs that are handled by prologue, epilogue.
    873 def CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add RBP)>;
    874 
    875 // CSRs that are handled explicitly via copies.
    876 def CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(sub CSR_64_TLS_Darwin, RBP)>;
    877 
    878 // All GPRs - except r11
    879 def CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
    880                                               R8, R9, R10, RSP)>;
    881 
    882 // All registers - except r11
    883 def CSR_64_RT_AllRegs     : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
    884                                                  (sequence "XMM%u", 0, 15))>;
    885 def CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
    886                                                  (sequence "YMM%u", 0, 15))>;
    887 
    888 def CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
    889                                            R11, R12, R13, R14, R15, RBP,
    890                                            (sequence "XMM%u", 0, 15))>;
    891 
    892 def CSR_32_AllRegs     : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
    893                                               EDI)>;
    894 def CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
    895                                               (sequence "XMM%u", 0, 7))>;
    896 def CSR_32_AllRegs_AVX : CalleeSavedRegs<(add CSR_32_AllRegs,
    897                                               (sequence "YMM%u", 0, 7))>;
    898 def CSR_32_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_32_AllRegs,
    899                                                  (sequence "ZMM%u", 0, 7),
    900                                                  (sequence "K%u", 0, 7))>;
    901 
    902 def CSR_64_AllRegs     : CalleeSavedRegs<(add CSR_64_MostRegs, RAX)>;
    903 def CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
    904                                                    (sequence "YMM%u", 0, 15)),
    905                                               (sequence "XMM%u", 0, 15))>;
    906 def CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
    907                                                       (sequence "ZMM%u", 0, 31),
    908                                                       (sequence "K%u", 0, 7)),
    909                                                  (sequence "XMM%u", 0, 15))>;
    910 
    911 // Standard C + YMM6-15
    912 def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
    913                                                   R13, R14, R15,
    914                                                   (sequence "YMM%u", 6, 15))>;
    915 
    916 def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
    917                                                      R12, R13, R14, R15,
    918                                                      (sequence "ZMM%u", 6, 21),
    919                                                      K4, K5, K6, K7)>;
    920 //Standard C + XMM 8-15
    921 def CSR_64_Intel_OCL_BI       : CalleeSavedRegs<(add CSR_64,
    922                                                  (sequence "XMM%u", 8, 15))>;
    923 
    924 //Standard C + YMM 8-15
    925 def CSR_64_Intel_OCL_BI_AVX    : CalleeSavedRegs<(add CSR_64,
    926                                                   (sequence "YMM%u", 8, 15))>;
    927 
    928 def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RDI, RSI, R14, R15,
    929                                                   (sequence "ZMM%u", 16, 31),
    930                                                   K4, K5, K6, K7)>;
    931 
    932 // Only R12 is preserved for PHP calls in HHVM.
    933 def CSR_64_HHVM : CalleeSavedRegs<(add R12)>;
    934