1 //===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This describes the calling conventions for the X86-32 and X86-64 11 // architectures. 12 // 13 //===----------------------------------------------------------------------===// 14 15 /// CCIfSubtarget - Match if the current subtarget has a feature F. 16 class CCIfSubtarget<string F, CCAction A> 17 : CCIf<!strconcat("State.getTarget().getSubtarget<X86Subtarget>().", F), A>; 18 19 //===----------------------------------------------------------------------===// 20 // Return Value Calling Conventions 21 //===----------------------------------------------------------------------===// 22 23 // Return-value conventions common to all X86 CC's. 24 def RetCC_X86Common : CallingConv<[ 25 // Scalar values are returned in AX first, then DX. For i8, the ABI 26 // requires the values to be in AL and AH, however this code uses AL and DL 27 // instead. This is because using AH for the second register conflicts with 28 // the way LLVM does multiple return values -- a return of {i16,i8} would end 29 // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI 30 // for functions that return two i8 values are currently expected to pack the 31 // values into an i16 (which uses AX, and thus AL:AH). 32 // 33 // For code that doesn't care about the ABI, we allow returning more than two 34 // integer values in registers. 35 CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>, 36 CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>, 37 CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>, 38 CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>, 39 40 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 41 // can only be used by ABI non-compliant code. If the target doesn't have XMM 42 // registers, it won't have vector types. 43 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 44 CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, 45 46 // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3 47 // can only be used by ABI non-compliant code. This vector type is only 48 // supported while using the AVX target feature. 49 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 50 CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>, 51 52 // MMX vector types are always returned in MM0. If the target doesn't have 53 // MM0, it doesn't support these vector types. 54 CCIfType<[x86mmx], CCAssignToReg<[MM0]>>, 55 56 // Long double types are always returned in ST0 (even with SSE). 57 CCIfType<[f80], CCAssignToReg<[ST0, ST1]>> 58 ]>; 59 60 // X86-32 C return-value convention. 61 def RetCC_X86_32_C : CallingConv<[ 62 // The X86-32 calling convention returns FP values in ST0, unless marked 63 // with "inreg" (used here to distinguish one kind of reg from another, 64 // weirdly; this is really the sse-regparm calling convention) in which 65 // case they use XMM0, otherwise it is the same as the common X86 calling 66 // conv. 67 CCIfInReg<CCIfSubtarget<"hasSSE2()", 68 CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 69 CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>, 70 CCDelegateTo<RetCC_X86Common> 71 ]>; 72 73 // X86-32 FastCC return-value convention. 74 def RetCC_X86_32_Fast : CallingConv<[ 75 // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has 76 // SSE2. 77 // This can happen when a float, 2 x float, or 3 x float vector is split by 78 // target lowering, and is returned in 1-3 sse regs. 79 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 80 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 81 82 // For integers, ECX can be used as an extra return register 83 CCIfType<[i8], CCAssignToReg<[AL, DL, CL]>>, 84 CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>, 85 CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>, 86 87 // Otherwise, it is the same as the common X86 calling convention. 88 CCDelegateTo<RetCC_X86Common> 89 ]>; 90 91 // X86-64 C return-value convention. 92 def RetCC_X86_64_C : CallingConv<[ 93 // The X86-64 calling convention always returns FP values in XMM0. 94 CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>, 95 CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>, 96 97 // MMX vector types are always returned in XMM0. 98 CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>, 99 CCDelegateTo<RetCC_X86Common> 100 ]>; 101 102 // X86-Win64 C return-value convention. 103 def RetCC_X86_Win64_C : CallingConv<[ 104 // The X86-Win64 calling convention always returns __m64 values in RAX. 105 CCIfType<[x86mmx], CCBitConvertToType<i64>>, 106 107 // Otherwise, everything is the same as 'normal' X86-64 C CC. 108 CCDelegateTo<RetCC_X86_64_C> 109 ]>; 110 111 112 // This is the root return-value convention for the X86-32 backend. 113 def RetCC_X86_32 : CallingConv<[ 114 // If FastCC, use RetCC_X86_32_Fast. 115 CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>, 116 // Otherwise, use RetCC_X86_32_C. 117 CCDelegateTo<RetCC_X86_32_C> 118 ]>; 119 120 // This is the root return-value convention for the X86-64 backend. 121 def RetCC_X86_64 : CallingConv<[ 122 // Mingw64 and native Win64 use Win64 CC 123 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>, 124 125 // Otherwise, drop to normal X86-64 CC 126 CCDelegateTo<RetCC_X86_64_C> 127 ]>; 128 129 // This is the return-value convention used for the entire X86 backend. 130 def RetCC_X86 : CallingConv<[ 131 CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>, 132 CCDelegateTo<RetCC_X86_32> 133 ]>; 134 135 //===----------------------------------------------------------------------===// 136 // X86-64 Argument Calling Conventions 137 //===----------------------------------------------------------------------===// 138 139 def CC_X86_64_C : CallingConv<[ 140 // Handles byval parameters. 141 CCIfByVal<CCPassByVal<8, 8>>, 142 143 // Promote i8/i16 arguments to i32. 144 CCIfType<[i8, i16], CCPromoteToType<i32>>, 145 146 // The 'nest' parameter, if any, is passed in R10. 147 CCIfNest<CCAssignToReg<[R10]>>, 148 149 // The first 6 integer arguments are passed in integer registers. 150 CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>, 151 CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>, 152 153 // The first 8 MMX vector arguments are passed in XMM registers on Darwin. 154 CCIfType<[x86mmx], 155 CCIfSubtarget<"isTargetDarwin()", 156 CCIfSubtarget<"hasSSE2()", 157 CCPromoteToType<v2i64>>>>, 158 159 // The first 8 FP/Vector arguments are passed in XMM registers. 160 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 161 CCIfSubtarget<"hasSSE1()", 162 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>, 163 164 // The first 8 256-bit vector arguments are passed in YMM registers, unless 165 // this is a vararg function. 166 // FIXME: This isn't precisely correct; the x86-64 ABI document says that 167 // fixed arguments to vararg functions are supposed to be passed in 168 // registers. Actually modeling that would be a lot of work, though. 169 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 170 CCIfSubtarget<"hasAVX()", 171 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, 172 YMM4, YMM5, YMM6, YMM7]>>>>, 173 174 // Integer/FP values get stored in stack slots that are 8 bytes in size and 175 // 8-byte aligned if there are no more registers to hold them. 176 CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, 177 178 // Long doubles get stack slots whose size and alignment depends on the 179 // subtarget. 180 CCIfType<[f80], CCAssignToStack<0, 0>>, 181 182 // Vectors get 16-byte stack slots that are 16-byte aligned. 183 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 184 185 // 256-bit vectors get 32-byte stack slots that are 32-byte aligned. 186 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 187 CCAssignToStack<32, 32>> 188 ]>; 189 190 // Calling convention used on Win64 191 def CC_X86_Win64_C : CallingConv<[ 192 // FIXME: Handle byval stuff. 193 // FIXME: Handle varargs. 194 195 // Promote i8/i16 arguments to i32. 196 CCIfType<[i8, i16], CCPromoteToType<i32>>, 197 198 // The 'nest' parameter, if any, is passed in R10. 199 CCIfNest<CCAssignToReg<[R10]>>, 200 201 // 128 bit vectors are passed by pointer 202 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, 203 204 205 // 256 bit vectors are passed by pointer 206 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>, 207 208 // The first 4 MMX vector arguments are passed in GPRs. 209 CCIfType<[x86mmx], CCBitConvertToType<i64>>, 210 211 // The first 4 integer arguments are passed in integer registers. 212 CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ], 213 [XMM0, XMM1, XMM2, XMM3]>>, 214 215 // Do not pass the sret argument in RCX, the Win64 thiscall calling 216 // convention requires "this" to be passed in RCX. 217 CCIfCC<"CallingConv::X86_ThisCall", 218 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8 , R9 ], 219 [XMM1, XMM2, XMM3]>>>>, 220 221 CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8 , R9 ], 222 [XMM0, XMM1, XMM2, XMM3]>>, 223 224 // The first 4 FP/Vector arguments are passed in XMM registers. 225 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 226 CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3], 227 [RCX , RDX , R8 , R9 ]>>, 228 229 // Integer/FP values get stored in stack slots that are 8 bytes in size and 230 // 8-byte aligned if there are no more registers to hold them. 231 CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, 232 233 // Long doubles get stack slots whose size and alignment depends on the 234 // subtarget. 235 CCIfType<[f80], CCAssignToStack<0, 0>> 236 ]>; 237 238 def CC_X86_64_GHC : CallingConv<[ 239 // Promote i8/i16/i32 arguments to i64. 240 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 241 242 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim 243 CCIfType<[i64], 244 CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>, 245 246 // Pass in STG registers: F1, F2, F3, F4, D1, D2 247 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 248 CCIfSubtarget<"hasSSE1()", 249 CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>> 250 ]>; 251 252 //===----------------------------------------------------------------------===// 253 // X86 C Calling Convention 254 //===----------------------------------------------------------------------===// 255 256 /// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP 257 /// values are spilled on the stack, and the first 4 vector values go in XMM 258 /// regs. 259 def CC_X86_32_Common : CallingConv<[ 260 // Handles byval parameters. 261 CCIfByVal<CCPassByVal<4, 4>>, 262 263 // The first 3 float or double arguments, if marked 'inreg' and if the call 264 // is not a vararg call and if SSE2 is available, are passed in SSE registers. 265 CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64], 266 CCIfSubtarget<"hasSSE2()", 267 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>, 268 269 // The first 3 __m64 vector arguments are passed in mmx registers if the 270 // call is not a vararg call. 271 CCIfNotVarArg<CCIfType<[x86mmx], 272 CCAssignToReg<[MM0, MM1, MM2]>>>, 273 274 // Integer/Float values get stored in stack slots that are 4 bytes in 275 // size and 4-byte aligned. 276 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 277 278 // Doubles get 8-byte slots that are 4-byte aligned. 279 CCIfType<[f64], CCAssignToStack<8, 4>>, 280 281 // Long doubles get slots whose size depends on the subtarget. 282 CCIfType<[f80], CCAssignToStack<0, 4>>, 283 284 // The first 4 SSE vector arguments are passed in XMM registers. 285 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 286 CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>, 287 288 // The first 4 AVX 256-bit vector arguments are passed in YMM registers. 289 CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 290 CCIfSubtarget<"hasAVX()", 291 CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>, 292 293 // Other SSE vectors get 16-byte stack slots that are 16-byte aligned. 294 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 295 296 // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned. 297 CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], 298 CCAssignToStack<32, 32>>, 299 300 // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are 301 // passed in the parameter area. 302 CCIfType<[x86mmx], CCAssignToStack<8, 4>>]>; 303 304 def CC_X86_32_C : CallingConv<[ 305 // Promote i8/i16 arguments to i32. 306 CCIfType<[i8, i16], CCPromoteToType<i32>>, 307 308 // The 'nest' parameter, if any, is passed in ECX. 309 CCIfNest<CCAssignToReg<[ECX]>>, 310 311 // The first 3 integer arguments, if marked 'inreg' and if the call is not 312 // a vararg call, are passed in integer registers. 313 CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>, 314 315 // Otherwise, same as everything else. 316 CCDelegateTo<CC_X86_32_Common> 317 ]>; 318 319 def CC_X86_32_FastCall : CallingConv<[ 320 // Promote i8/i16 arguments to i32. 321 CCIfType<[i8, i16], CCPromoteToType<i32>>, 322 323 // The 'nest' parameter, if any, is passed in EAX. 324 CCIfNest<CCAssignToReg<[EAX]>>, 325 326 // The first 2 integer arguments are passed in ECX/EDX 327 CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>, 328 329 // Otherwise, same as everything else. 330 CCDelegateTo<CC_X86_32_Common> 331 ]>; 332 333 def CC_X86_32_ThisCall : CallingConv<[ 334 // Promote i8/i16 arguments to i32. 335 CCIfType<[i8, i16], CCPromoteToType<i32>>, 336 337 // Pass sret arguments indirectly through EAX 338 CCIfSRet<CCAssignToReg<[EAX]>>, 339 340 // The first integer argument is passed in ECX 341 CCIfType<[i32], CCAssignToReg<[ECX]>>, 342 343 // Otherwise, same as everything else. 344 CCDelegateTo<CC_X86_32_Common> 345 ]>; 346 347 def CC_X86_32_FastCC : CallingConv<[ 348 // Handles byval parameters. Note that we can't rely on the delegation 349 // to CC_X86_32_Common for this because that happens after code that 350 // puts arguments in registers. 351 CCIfByVal<CCPassByVal<4, 4>>, 352 353 // Promote i8/i16 arguments to i32. 354 CCIfType<[i8, i16], CCPromoteToType<i32>>, 355 356 // The 'nest' parameter, if any, is passed in EAX. 357 CCIfNest<CCAssignToReg<[EAX]>>, 358 359 // The first 2 integer arguments are passed in ECX/EDX 360 CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>, 361 362 // The first 3 float or double arguments, if the call is not a vararg 363 // call and if SSE2 is available, are passed in SSE registers. 364 CCIfNotVarArg<CCIfType<[f32,f64], 365 CCIfSubtarget<"hasSSE2()", 366 CCAssignToReg<[XMM0,XMM1,XMM2]>>>>, 367 368 // Doubles get 8-byte slots that are 8-byte aligned. 369 CCIfType<[f64], CCAssignToStack<8, 8>>, 370 371 // Otherwise, same as everything else. 372 CCDelegateTo<CC_X86_32_Common> 373 ]>; 374 375 def CC_X86_32_GHC : CallingConv<[ 376 // Promote i8/i16 arguments to i32. 377 CCIfType<[i8, i16], CCPromoteToType<i32>>, 378 379 // Pass in STG registers: Base, Sp, Hp, R1 380 CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>> 381 ]>; 382 383 //===----------------------------------------------------------------------===// 384 // X86 Root Argument Calling Conventions 385 //===----------------------------------------------------------------------===// 386 387 // This is the root argument convention for the X86-32 backend. 388 def CC_X86_32 : CallingConv<[ 389 CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>, 390 CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>, 391 CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>, 392 CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>, 393 394 // Otherwise, drop to normal X86-32 CC 395 CCDelegateTo<CC_X86_32_C> 396 ]>; 397 398 // This is the root argument convention for the X86-64 backend. 399 def CC_X86_64 : CallingConv<[ 400 CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>, 401 402 // Mingw64 and native Win64 use Win64 CC 403 CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>, 404 405 // Otherwise, drop to normal X86-64 CC 406 CCDelegateTo<CC_X86_64_C> 407 ]>; 408 409 // This is the argument convention used for the entire X86 backend. 410 def CC_X86 : CallingConv<[ 411 CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>, 412 CCDelegateTo<CC_X86_32> 413 ]>; 414 415 //===----------------------------------------------------------------------===// 416 // Callee-saved Registers. 417 //===----------------------------------------------------------------------===// 418 419 def CSR_NoRegs : CalleeSavedRegs<(add)>; 420 421 def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>; 422 def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>; 423 424 def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>; 425 def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>; 426 427 def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15, 428 (sequence "XMM%u", 6, 15))>; 429