xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
10b57cec5SDimitry Andric//===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
20b57cec5SDimitry Andric//
30b57cec5SDimitry Andric// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric// See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric//
70b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric//
90b57cec5SDimitry Andric// This describes the calling conventions for the X86-32 and X86-64
100b57cec5SDimitry Andric// architectures.
110b57cec5SDimitry Andric//
120b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric
140b57cec5SDimitry Andric/// CCIfSubtarget - Match if the current subtarget has a feature F.
150b57cec5SDimitry Andricclass CCIfSubtarget<string F, CCAction A>
160b57cec5SDimitry Andric    : CCIf<!strconcat("static_cast<const X86Subtarget&>"
170b57cec5SDimitry Andric                       "(State.getMachineFunction().getSubtarget()).", F),
180b57cec5SDimitry Andric           A>;
190b57cec5SDimitry Andric
200b57cec5SDimitry Andric/// CCIfNotSubtarget - Match if the current subtarget doesn't has a feature F.
210b57cec5SDimitry Andricclass CCIfNotSubtarget<string F, CCAction A>
220b57cec5SDimitry Andric    : CCIf<!strconcat("!static_cast<const X86Subtarget&>"
230b57cec5SDimitry Andric                       "(State.getMachineFunction().getSubtarget()).", F),
240b57cec5SDimitry Andric           A>;
250b57cec5SDimitry Andric
26349cc55cSDimitry Andric/// CCIfIsVarArgOnWin - Match if isVarArg on Windows 32bits.
27349cc55cSDimitry Andricclass CCIfIsVarArgOnWin<CCAction A>
28349cc55cSDimitry Andric    : CCIf<"State.isVarArg() && "
29349cc55cSDimitry Andric           "State.getMachineFunction().getSubtarget().getTargetTriple()."
30349cc55cSDimitry Andric           "isWindowsMSVCEnvironment()",
31349cc55cSDimitry Andric           A>;
32349cc55cSDimitry Andric
330b57cec5SDimitry Andric// Register classes for RegCall
340b57cec5SDimitry Andricclass RC_X86_RegCall {
350b57cec5SDimitry Andric  list<Register> GPR_8 = [];
360b57cec5SDimitry Andric  list<Register> GPR_16 = [];
370b57cec5SDimitry Andric  list<Register> GPR_32 = [];
380b57cec5SDimitry Andric  list<Register> GPR_64 = [];
390b57cec5SDimitry Andric  list<Register> FP_CALL = [FP0];
400b57cec5SDimitry Andric  list<Register> FP_RET = [FP0, FP1];
410b57cec5SDimitry Andric  list<Register> XMM = [];
420b57cec5SDimitry Andric  list<Register> YMM = [];
430b57cec5SDimitry Andric  list<Register> ZMM = [];
440b57cec5SDimitry Andric}
450b57cec5SDimitry Andric
460b57cec5SDimitry Andric// RegCall register classes for 32 bits
470b57cec5SDimitry Andricdef RC_X86_32_RegCall : RC_X86_RegCall {
480b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL];
490b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI];
500b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI];
510b57cec5SDimitry Andric  let GPR_64 = [RAX]; ///< Not actually used, but AssignToReg can't handle []
520b57cec5SDimitry Andric                      ///< \todo Fix AssignToReg to enable empty lists
530b57cec5SDimitry Andric  let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7];
540b57cec5SDimitry Andric  let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7];
550b57cec5SDimitry Andric  let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7];
560b57cec5SDimitry Andric}
570b57cec5SDimitry Andric
580b57cec5SDimitry Andricclass RC_X86_64_RegCall : RC_X86_RegCall {
590b57cec5SDimitry Andric  let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
600b57cec5SDimitry Andric             XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15];
610b57cec5SDimitry Andric  let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
620b57cec5SDimitry Andric             YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15];
630b57cec5SDimitry Andric  let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7,
640b57cec5SDimitry Andric             ZMM8, ZMM9, ZMM10, ZMM11, ZMM12, ZMM13, ZMM14, ZMM15];
650b57cec5SDimitry Andric}
660b57cec5SDimitry Andric
670b57cec5SDimitry Andricdef RC_X86_64_RegCall_Win : RC_X86_64_RegCall {
680b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R10B, R11B, R12B, R14B, R15B];
690b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R10W, R11W, R12W, R14W, R15W];
700b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R10D, R11D, R12D, R14D, R15D];
710b57cec5SDimitry Andric  let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R10, R11, R12, R14, R15];
720b57cec5SDimitry Andric}
730b57cec5SDimitry Andric
740b57cec5SDimitry Andricdef RC_X86_64_RegCall_SysV : RC_X86_64_RegCall {
750b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R12B, R13B, R14B, R15B];
760b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R12W, R13W, R14W, R15W];
770b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R12D, R13D, R14D, R15D];
780b57cec5SDimitry Andric  let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R12, R13, R14, R15];
790b57cec5SDimitry Andric}
800b57cec5SDimitry Andric
810b57cec5SDimitry Andric// X86-64 Intel regcall calling convention.
820b57cec5SDimitry Andricmulticlass X86_RegCall_base<RC_X86_RegCall RC> {
830b57cec5SDimitry Andricdef CC_#NAME : CallingConv<[
840b57cec5SDimitry Andric  // Handles byval parameters.
850b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfByVal<CCPassByVal<8, 8>>>,
860b57cec5SDimitry Andric    CCIfByVal<CCPassByVal<4, 4>>,
870b57cec5SDimitry Andric
880b57cec5SDimitry Andric    // Promote i1/i8/i16/v1i1 arguments to i32.
890b57cec5SDimitry Andric    CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
900b57cec5SDimitry Andric
910b57cec5SDimitry Andric    // Promote v8i1/v16i1/v32i1 arguments to i32.
920b57cec5SDimitry Andric    CCIfType<[v8i1, v16i1, v32i1], CCPromoteToType<i32>>,
930b57cec5SDimitry Andric
940b57cec5SDimitry Andric    // bool, char, int, enum, long, pointer --> GPR
950b57cec5SDimitry Andric    CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
960b57cec5SDimitry Andric
970b57cec5SDimitry Andric    // long long, __int64 --> GPR
980b57cec5SDimitry Andric    CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
990b57cec5SDimitry Andric
1000b57cec5SDimitry Andric    // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
1010b57cec5SDimitry Andric    CCIfType<[v64i1], CCPromoteToType<i64>>,
1020b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i64],
1030b57cec5SDimitry Andric      CCAssignToReg<RC.GPR_64>>>,
1040b57cec5SDimitry Andric    CCIfSubtarget<"is32Bit()", CCIfType<[i64],
1050b57cec5SDimitry Andric      CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
1060b57cec5SDimitry Andric
1070b57cec5SDimitry Andric    // float, double, float128 --> XMM
1080b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1090b57cec5SDimitry Andric    CCIfType<[f32, f64, f128],
1100b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
1110b57cec5SDimitry Andric
1120b57cec5SDimitry Andric    // long double --> FP
1130b57cec5SDimitry Andric    CCIfType<[f80], CCAssignToReg<RC.FP_CALL>>,
1140b57cec5SDimitry Andric
1150b57cec5SDimitry Andric    // __m128, __m128i, __m128d --> XMM
1160b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1170b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
1180b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
1190b57cec5SDimitry Andric
1200b57cec5SDimitry Andric    // __m256, __m256i, __m256d --> YMM
1210b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1220b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
1230b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
1240b57cec5SDimitry Andric
1250b57cec5SDimitry Andric    // __m512, __m512i, __m512d --> ZMM
1260b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
1270b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
1280b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX512()",CCAssignToReg<RC.ZMM>>>,
1290b57cec5SDimitry Andric
1300b57cec5SDimitry Andric    // If no register was found -> assign to stack
1310b57cec5SDimitry Andric
1320b57cec5SDimitry Andric    // In 64 bit, assign 64/32 bit values to 8 byte stack
1330b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i32, i64, f32, f64],
1340b57cec5SDimitry Andric      CCAssignToStack<8, 8>>>,
1350b57cec5SDimitry Andric
1360b57cec5SDimitry Andric    // In 32 bit, assign 64/32 bit values to 8/4 byte stack
1370b57cec5SDimitry Andric    CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
1380b57cec5SDimitry Andric    CCIfType<[i64, f64], CCAssignToStack<8, 4>>,
1390b57cec5SDimitry Andric
1400b57cec5SDimitry Andric    // MMX type gets 8 byte slot in stack , while alignment depends on target
1410b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[x86mmx], CCAssignToStack<8, 8>>>,
1420b57cec5SDimitry Andric    CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
1430b57cec5SDimitry Andric
1440b57cec5SDimitry Andric    // float 128 get stack slots whose size and alignment depends
1450b57cec5SDimitry Andric    // on the subtarget.
1460b57cec5SDimitry Andric    CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
1470b57cec5SDimitry Andric
1480b57cec5SDimitry Andric    // Vectors get 16-byte stack slots that are 16-byte aligned.
1490b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
1500b57cec5SDimitry Andric      CCAssignToStack<16, 16>>,
1510b57cec5SDimitry Andric
1520b57cec5SDimitry Andric    // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
1530b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
1540b57cec5SDimitry Andric      CCAssignToStack<32, 32>>,
1550b57cec5SDimitry Andric
1560b57cec5SDimitry Andric    // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
1570b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
1580b57cec5SDimitry Andric      CCAssignToStack<64, 64>>
1590b57cec5SDimitry Andric]>;
1600b57cec5SDimitry Andric
1610b57cec5SDimitry Andricdef RetCC_#NAME : CallingConv<[
1620b57cec5SDimitry Andric    // Promote i1, v1i1, v8i1 arguments to i8.
1630b57cec5SDimitry Andric    CCIfType<[i1, v1i1, v8i1], CCPromoteToType<i8>>,
1640b57cec5SDimitry Andric
1650b57cec5SDimitry Andric    // Promote v16i1 arguments to i16.
1660b57cec5SDimitry Andric    CCIfType<[v16i1], CCPromoteToType<i16>>,
1670b57cec5SDimitry Andric
1680b57cec5SDimitry Andric    // Promote v32i1 arguments to i32.
1690b57cec5SDimitry Andric    CCIfType<[v32i1], CCPromoteToType<i32>>,
1700b57cec5SDimitry Andric
1710b57cec5SDimitry Andric    // bool, char, int, enum, long, pointer --> GPR
1720b57cec5SDimitry Andric    CCIfType<[i8], CCAssignToReg<RC.GPR_8>>,
1730b57cec5SDimitry Andric    CCIfType<[i16], CCAssignToReg<RC.GPR_16>>,
1740b57cec5SDimitry Andric    CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
1750b57cec5SDimitry Andric
1760b57cec5SDimitry Andric    // long long, __int64 --> GPR
1770b57cec5SDimitry Andric    CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
1780b57cec5SDimitry Andric
1790b57cec5SDimitry Andric    // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
1800b57cec5SDimitry Andric    CCIfType<[v64i1], CCPromoteToType<i64>>,
1810b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i64],
1820b57cec5SDimitry Andric      CCAssignToReg<RC.GPR_64>>>,
1830b57cec5SDimitry Andric    CCIfSubtarget<"is32Bit()", CCIfType<[i64],
1840b57cec5SDimitry Andric      CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
1850b57cec5SDimitry Andric
1860b57cec5SDimitry Andric    // long double --> FP
1870b57cec5SDimitry Andric    CCIfType<[f80], CCAssignToReg<RC.FP_RET>>,
1880b57cec5SDimitry Andric
1890b57cec5SDimitry Andric    // float, double, float128 --> XMM
1900b57cec5SDimitry Andric    CCIfType<[f32, f64, f128],
1910b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
1920b57cec5SDimitry Andric
1930b57cec5SDimitry Andric    // __m128, __m128i, __m128d --> XMM
1940b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
1950b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
1960b57cec5SDimitry Andric
1970b57cec5SDimitry Andric    // __m256, __m256i, __m256d --> YMM
1980b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
1990b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
2000b57cec5SDimitry Andric
2010b57cec5SDimitry Andric    // __m512, __m512i, __m512d --> ZMM
2020b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
2030b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX512()", CCAssignToReg<RC.ZMM>>>
2040b57cec5SDimitry Andric]>;
2050b57cec5SDimitry Andric}
2060b57cec5SDimitry Andric
2070b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
2080b57cec5SDimitry Andric// Return Value Calling Conventions
2090b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
2100b57cec5SDimitry Andric
2110b57cec5SDimitry Andric// Return-value conventions common to all X86 CC's.
2120b57cec5SDimitry Andricdef RetCC_X86Common : CallingConv<[
2130b57cec5SDimitry Andric  // Scalar values are returned in AX first, then DX.  For i8, the ABI
2140b57cec5SDimitry Andric  // requires the values to be in AL and AH, however this code uses AL and DL
2150b57cec5SDimitry Andric  // instead. This is because using AH for the second register conflicts with
2160b57cec5SDimitry Andric  // the way LLVM does multiple return values -- a return of {i16,i8} would end
2170b57cec5SDimitry Andric  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
2180b57cec5SDimitry Andric  // for functions that return two i8 values are currently expected to pack the
2190b57cec5SDimitry Andric  // values into an i16 (which uses AX, and thus AL:AH).
2200b57cec5SDimitry Andric  //
2210b57cec5SDimitry Andric  // For code that doesn't care about the ABI, we allow returning more than two
2220b57cec5SDimitry Andric  // integer values in registers.
2230b57cec5SDimitry Andric  CCIfType<[v1i1],  CCPromoteToType<i8>>,
2240b57cec5SDimitry Andric  CCIfType<[i1],  CCPromoteToType<i8>>,
2250b57cec5SDimitry Andric  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
2260b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
2270b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
2280b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
2290b57cec5SDimitry Andric
2300b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are returned in SIMD registers.
2310b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
2320b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
2330b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
2340b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
2350b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
2360b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
2370b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
2380b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
2390b57cec5SDimitry Andric
2400b57cec5SDimitry Andric  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
2410b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. If the target doesn't have XMM
2420b57cec5SDimitry Andric  // registers, it won't have vector types.
243349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
2440b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
2450b57cec5SDimitry Andric
2460b57cec5SDimitry Andric  // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
2470b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. This vector type is only
2480b57cec5SDimitry Andric  // supported while using the AVX target feature.
249349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
2500b57cec5SDimitry Andric            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
2510b57cec5SDimitry Andric
2520b57cec5SDimitry Andric  // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
2530b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. This vector type is only
2540b57cec5SDimitry Andric  // supported while using the AVX-512 target feature.
255349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
2560b57cec5SDimitry Andric            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
2570b57cec5SDimitry Andric
2580b57cec5SDimitry Andric  // MMX vector types are always returned in MM0. If the target doesn't have
2590b57cec5SDimitry Andric  // MM0, it doesn't support these vector types.
2600b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
2610b57cec5SDimitry Andric
2620b57cec5SDimitry Andric  // Long double types are always returned in FP0 (even with SSE),
2630b57cec5SDimitry Andric  // except on Win64.
2640b57cec5SDimitry Andric  CCIfNotSubtarget<"isTargetWin64()", CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>
2650b57cec5SDimitry Andric]>;
2660b57cec5SDimitry Andric
2670b57cec5SDimitry Andric// X86-32 C return-value convention.
2680b57cec5SDimitry Andricdef RetCC_X86_32_C : CallingConv<[
2690b57cec5SDimitry Andric  // The X86-32 calling convention returns FP values in FP0, unless marked
2700b57cec5SDimitry Andric  // with "inreg" (used here to distinguish one kind of reg from another,
2710b57cec5SDimitry Andric  // weirdly; this is really the sse-regparm calling convention) in which
2720b57cec5SDimitry Andric  // case they use XMM0, otherwise it is the same as the common X86 calling
2730b57cec5SDimitry Andric  // conv.
2740b57cec5SDimitry Andric  CCIfInReg<CCIfSubtarget<"hasSSE2()",
2750b57cec5SDimitry Andric    CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
276349cc55cSDimitry Andric  CCIfSubtarget<"hasX87()",
277349cc55cSDimitry Andric    CCIfType<[f32, f64], CCAssignToReg<[FP0, FP1]>>>,
278349cc55cSDimitry Andric  CCIfNotSubtarget<"hasX87()",
279349cc55cSDimitry Andric    CCIfType<[f32], CCAssignToReg<[EAX, EDX, ECX]>>>,
280349cc55cSDimitry Andric  CCIfType<[f16], CCAssignToReg<[XMM0,XMM1,XMM2]>>,
2810b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
2820b57cec5SDimitry Andric]>;
2830b57cec5SDimitry Andric
2840b57cec5SDimitry Andric// X86-32 FastCC return-value convention.
2850b57cec5SDimitry Andricdef RetCC_X86_32_Fast : CallingConv<[
2860b57cec5SDimitry Andric  // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
2870b57cec5SDimitry Andric  // SSE2.
2880b57cec5SDimitry Andric  // This can happen when a float, 2 x float, or 3 x float vector is split by
2890b57cec5SDimitry Andric  // target lowering, and is returned in 1-3 sse regs.
2900b57cec5SDimitry Andric  CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
2910b57cec5SDimitry Andric  CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
2920b57cec5SDimitry Andric
2930b57cec5SDimitry Andric  // For integers, ECX can be used as an extra return register
2940b57cec5SDimitry Andric  CCIfType<[i8],  CCAssignToReg<[AL, DL, CL]>>,
2950b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
2960b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
2970b57cec5SDimitry Andric
2980b57cec5SDimitry Andric  // Otherwise, it is the same as the common X86 calling convention.
2990b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3000b57cec5SDimitry Andric]>;
3010b57cec5SDimitry Andric
3020b57cec5SDimitry Andric// Intel_OCL_BI return-value convention.
3030b57cec5SDimitry Andricdef RetCC_Intel_OCL_BI : CallingConv<[
3040b57cec5SDimitry Andric  // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
3050b57cec5SDimitry Andric  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
3060b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
3070b57cec5SDimitry Andric
3080b57cec5SDimitry Andric  // 256-bit FP vectors
3090b57cec5SDimitry Andric  // No more than 4 registers
3100b57cec5SDimitry Andric  CCIfType<[v8f32, v4f64, v8i32, v4i64],
3110b57cec5SDimitry Andric            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
3120b57cec5SDimitry Andric
3130b57cec5SDimitry Andric  // 512-bit FP vectors
3140b57cec5SDimitry Andric  CCIfType<[v16f32, v8f64, v16i32, v8i64],
3150b57cec5SDimitry Andric            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
3160b57cec5SDimitry Andric
3170b57cec5SDimitry Andric  // i32, i64 in the standard way
3180b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3190b57cec5SDimitry Andric]>;
3200b57cec5SDimitry Andric
3210b57cec5SDimitry Andric// X86-32 HiPE return-value convention.
3220b57cec5SDimitry Andricdef RetCC_X86_32_HiPE : CallingConv<[
3230b57cec5SDimitry Andric  // Promote all types to i32
3240b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
3250b57cec5SDimitry Andric
3260b57cec5SDimitry Andric  // Return: HP, P, VAL1, VAL2
3270b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
3280b57cec5SDimitry Andric]>;
3290b57cec5SDimitry Andric
3300b57cec5SDimitry Andric// X86-32 Vectorcall return-value convention.
3310b57cec5SDimitry Andricdef RetCC_X86_32_VectorCall : CallingConv<[
3320b57cec5SDimitry Andric  // Floating Point types are returned in XMM0,XMM1,XMMM2 and XMM3.
3330b57cec5SDimitry Andric  CCIfType<[f32, f64, f128],
3340b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
3350b57cec5SDimitry Andric
3360b57cec5SDimitry Andric  // Return integers in the standard way.
3370b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3380b57cec5SDimitry Andric]>;
3390b57cec5SDimitry Andric
3400b57cec5SDimitry Andric// X86-64 C return-value convention.
3410b57cec5SDimitry Andricdef RetCC_X86_64_C : CallingConv<[
3420b57cec5SDimitry Andric  // The X86-64 calling convention always returns FP values in XMM0.
343349cc55cSDimitry Andric  CCIfType<[f16], CCAssignToReg<[XMM0, XMM1]>>,
3440b57cec5SDimitry Andric  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
3450b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
3460b57cec5SDimitry Andric  CCIfType<[f128], CCAssignToReg<[XMM0, XMM1]>>,
3470b57cec5SDimitry Andric
3480b57cec5SDimitry Andric  // MMX vector types are always returned in XMM0.
3490b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
3500b57cec5SDimitry Andric
351e8d8bef9SDimitry Andric  // Pointers are always returned in full 64-bit registers.
352e8d8bef9SDimitry Andric  CCIfPtr<CCCustom<"CC_X86_64_Pointer">>,
353e8d8bef9SDimitry Andric
3540b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
3550b57cec5SDimitry Andric
3560b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
3570b57cec5SDimitry Andric]>;
3580b57cec5SDimitry Andric
3590b57cec5SDimitry Andric// X86-Win64 C return-value convention.
3600b57cec5SDimitry Andricdef RetCC_X86_Win64_C : CallingConv<[
3610b57cec5SDimitry Andric  // The X86-Win64 calling convention always returns __m64 values in RAX.
3620b57cec5SDimitry Andric  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
3630b57cec5SDimitry Andric
364480093f4SDimitry Andric  // GCC returns FP values in RAX on Win64.
365480093f4SDimitry Andric  CCIfType<[f32], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i32>>>,
366480093f4SDimitry Andric  CCIfType<[f64], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i64>>>,
367480093f4SDimitry Andric
3680b57cec5SDimitry Andric  // Otherwise, everything is the same as 'normal' X86-64 C CC.
3690b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_64_C>
3700b57cec5SDimitry Andric]>;
3710b57cec5SDimitry Andric
3720b57cec5SDimitry Andric// X86-64 vectorcall return-value convention.
3730b57cec5SDimitry Andricdef RetCC_X86_64_Vectorcall : CallingConv<[
3740b57cec5SDimitry Andric  // Vectorcall calling convention always returns FP values in XMMs.
3750b57cec5SDimitry Andric  CCIfType<[f32, f64, f128],
3760b57cec5SDimitry Andric    CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
3770b57cec5SDimitry Andric
3780b57cec5SDimitry Andric  // Otherwise, everything is the same as Windows X86-64 C CC.
3790b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_Win64_C>
3800b57cec5SDimitry Andric]>;
3810b57cec5SDimitry Andric
3820b57cec5SDimitry Andric// X86-64 HiPE return-value convention.
3830b57cec5SDimitry Andricdef RetCC_X86_64_HiPE : CallingConv<[
3840b57cec5SDimitry Andric  // Promote all types to i64
3850b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
3860b57cec5SDimitry Andric
3870b57cec5SDimitry Andric  // Return: HP, P, VAL1, VAL2
3880b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
3890b57cec5SDimitry Andric]>;
3900b57cec5SDimitry Andric
3910b57cec5SDimitry Andric// X86-64 WebKit_JS return-value convention.
3920b57cec5SDimitry Andricdef RetCC_X86_64_WebKit_JS : CallingConv<[
3930b57cec5SDimitry Andric  // Promote all types to i64
3940b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
3950b57cec5SDimitry Andric
3960b57cec5SDimitry Andric  // Return: RAX
3970b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX]>>
3980b57cec5SDimitry Andric]>;
3990b57cec5SDimitry Andric
4000b57cec5SDimitry Andricdef RetCC_X86_64_Swift : CallingConv<[
4010b57cec5SDimitry Andric
4020b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
4030b57cec5SDimitry Andric
4040b57cec5SDimitry Andric  // For integers, ECX, R8D can be used as extra return registers.
4050b57cec5SDimitry Andric  CCIfType<[v1i1],  CCPromoteToType<i8>>,
4060b57cec5SDimitry Andric  CCIfType<[i1],  CCPromoteToType<i8>>,
4070b57cec5SDimitry Andric  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
4080b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
4090b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX, R8D]>>,
4100b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX, R8]>>,
4110b57cec5SDimitry Andric
4120b57cec5SDimitry Andric  // XMM0, XMM1, XMM2 and XMM3 can be used to return FP values.
4130b57cec5SDimitry Andric  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4140b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4150b57cec5SDimitry Andric  CCIfType<[f128], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4160b57cec5SDimitry Andric
4170b57cec5SDimitry Andric  // MMX vector types are returned in XMM0, XMM1, XMM2 and XMM3.
4180b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
4190b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
4200b57cec5SDimitry Andric]>;
4210b57cec5SDimitry Andric
4220b57cec5SDimitry Andric// X86-64 AnyReg return-value convention. No explicit register is specified for
4230b57cec5SDimitry Andric// the return-value. The register allocator is allowed and expected to choose
4240b57cec5SDimitry Andric// any free register.
4250b57cec5SDimitry Andric//
4260b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and
4270b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug
4280b57cec5SDimitry Andric// builds. On Release builds we fallback to the X86 C calling convention.
4290b57cec5SDimitry Andricdef RetCC_X86_64_AnyReg : CallingConv<[
4300b57cec5SDimitry Andric  CCCustom<"CC_X86_AnyReg_Error">
4310b57cec5SDimitry Andric]>;
4320b57cec5SDimitry Andric
4330b57cec5SDimitry Andric
4340b57cec5SDimitry Andricdefm X86_32_RegCall :
4350b57cec5SDimitry Andric	 X86_RegCall_base<RC_X86_32_RegCall>;
4360b57cec5SDimitry Andricdefm X86_Win64_RegCall :
4370b57cec5SDimitry Andric     X86_RegCall_base<RC_X86_64_RegCall_Win>;
4380b57cec5SDimitry Andricdefm X86_SysV64_RegCall :
4390b57cec5SDimitry Andric     X86_RegCall_base<RC_X86_64_RegCall_SysV>;
4400b57cec5SDimitry Andric
4410b57cec5SDimitry Andric// This is the root return-value convention for the X86-32 backend.
4420b57cec5SDimitry Andricdef RetCC_X86_32 : CallingConv<[
4430b57cec5SDimitry Andric  // If FastCC, use RetCC_X86_32_Fast.
4440b57cec5SDimitry Andric  CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
4458bcb0991SDimitry Andric  CCIfCC<"CallingConv::Tail", CCDelegateTo<RetCC_X86_32_Fast>>,
446480093f4SDimitry Andric  // CFGuard_Check never returns a value so does not need a RetCC.
4470b57cec5SDimitry Andric  // If HiPE, use RetCC_X86_32_HiPE.
4480b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
4490b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_32_VectorCall>>,
4500b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_32_RegCall>>,
4510b57cec5SDimitry Andric
4520b57cec5SDimitry Andric  // Otherwise, use RetCC_X86_32_C.
4530b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_32_C>
4540b57cec5SDimitry Andric]>;
4550b57cec5SDimitry Andric
4560b57cec5SDimitry Andric// This is the root return-value convention for the X86-64 backend.
4570b57cec5SDimitry Andricdef RetCC_X86_64 : CallingConv<[
4580b57cec5SDimitry Andric  // HiPE uses RetCC_X86_64_HiPE
4590b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
4600b57cec5SDimitry Andric
4610b57cec5SDimitry Andric  // Handle JavaScript calls.
4620b57cec5SDimitry Andric  CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
4630b57cec5SDimitry Andric  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
4640b57cec5SDimitry Andric
4650b57cec5SDimitry Andric  // Handle Swift calls.
4660b57cec5SDimitry Andric  CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
467fe6060f1SDimitry Andric  CCIfCC<"CallingConv::SwiftTail", CCDelegateTo<RetCC_X86_64_Swift>>,
4680b57cec5SDimitry Andric
4690b57cec5SDimitry Andric  // Handle explicit CC selection
4700b57cec5SDimitry Andric  CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
4710b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
4720b57cec5SDimitry Andric
4730b57cec5SDimitry Andric  // Handle Vectorcall CC
4740b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_64_Vectorcall>>,
4750b57cec5SDimitry Andric
4760b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
4770b57cec5SDimitry Andric          CCIfSubtarget<"isTargetWin64()",
4780b57cec5SDimitry Andric                        CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
4790b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
4800b57cec5SDimitry Andric
4810b57cec5SDimitry Andric  // Mingw64 and native Win64 use Win64 CC
4820b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
4830b57cec5SDimitry Andric
4840b57cec5SDimitry Andric  // Otherwise, drop to normal X86-64 CC
4850b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_64_C>
4860b57cec5SDimitry Andric]>;
4870b57cec5SDimitry Andric
4880b57cec5SDimitry Andric// This is the return-value convention used for the entire X86 backend.
4890b57cec5SDimitry Andriclet Entry = 1 in
4900b57cec5SDimitry Andricdef RetCC_X86 : CallingConv<[
4910b57cec5SDimitry Andric
4920b57cec5SDimitry Andric  // Check if this is the Intel OpenCL built-ins calling convention
4930b57cec5SDimitry Andric  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
4940b57cec5SDimitry Andric
4950b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
4960b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_32>
4970b57cec5SDimitry Andric]>;
4980b57cec5SDimitry Andric
4990b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
5000b57cec5SDimitry Andric// X86-64 Argument Calling Conventions
5010b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
5020b57cec5SDimitry Andric
5030b57cec5SDimitry Andricdef CC_X86_64_C : CallingConv<[
5040b57cec5SDimitry Andric  // Handles byval parameters.
5050b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<8, 8>>,
5060b57cec5SDimitry Andric
5070b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
5080b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
5090b57cec5SDimitry Andric
5100b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in R10.
5110b57cec5SDimitry Andric  CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
5120b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[R10]>>,
5130b57cec5SDimitry Andric
5140b57cec5SDimitry Andric  // Pass SwiftSelf in a callee saved register.
5150b57cec5SDimitry Andric  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
5160b57cec5SDimitry Andric
5170b57cec5SDimitry Andric  // A SwiftError is passed in R12.
5180b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
5190b57cec5SDimitry Andric
520fe6060f1SDimitry Andric  // Pass SwiftAsync in an otherwise callee saved register so that calls to
521fe6060f1SDimitry Andric  // normal functions don't need to save it somewhere.
522fe6060f1SDimitry Andric  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[R14]>>>,
523fe6060f1SDimitry Andric
524fe6060f1SDimitry Andric  // For Swift Calling Conventions, pass sret in %rax.
5250b57cec5SDimitry Andric  CCIfCC<"CallingConv::Swift",
5260b57cec5SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
527fe6060f1SDimitry Andric  CCIfCC<"CallingConv::SwiftTail",
528fe6060f1SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
5290b57cec5SDimitry Andric
530e8d8bef9SDimitry Andric  // Pointers are always passed in full 64-bit registers.
531e8d8bef9SDimitry Andric  CCIfPtr<CCCustom<"CC_X86_64_Pointer">>,
532e8d8bef9SDimitry Andric
5330b57cec5SDimitry Andric  // The first 6 integer arguments are passed in integer registers.
5340b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
5350b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
5360b57cec5SDimitry Andric
5370b57cec5SDimitry Andric  // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
5380b57cec5SDimitry Andric  CCIfType<[x86mmx],
5390b57cec5SDimitry Andric            CCIfSubtarget<"isTargetDarwin()",
5400b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE2()",
5410b57cec5SDimitry Andric            CCPromoteToType<v2i64>>>>,
5420b57cec5SDimitry Andric
5430b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are passed in SIMD registers.
5440b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
5450b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
5460b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
5470b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
5480b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
5490b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
5500b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
5510b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
5520b57cec5SDimitry Andric
5530b57cec5SDimitry Andric  // The first 8 FP/Vector arguments are passed in XMM registers.
554349cc55cSDimitry Andric  CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
5550b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE1()",
5560b57cec5SDimitry Andric            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
5570b57cec5SDimitry Andric
5580b57cec5SDimitry Andric  // The first 8 256-bit vector arguments are passed in YMM registers, unless
5590b57cec5SDimitry Andric  // this is a vararg function.
5600b57cec5SDimitry Andric  // FIXME: This isn't precisely correct; the x86-64 ABI document says that
5610b57cec5SDimitry Andric  // fixed arguments to vararg functions are supposed to be passed in
5620b57cec5SDimitry Andric  // registers.  Actually modeling that would be a lot of work, though.
563349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
5640b57cec5SDimitry Andric                          CCIfSubtarget<"hasAVX()",
5650b57cec5SDimitry Andric                          CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
5660b57cec5SDimitry Andric                                         YMM4, YMM5, YMM6, YMM7]>>>>,
5670b57cec5SDimitry Andric
5680b57cec5SDimitry Andric  // The first 8 512-bit vector arguments are passed in ZMM registers.
569349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
5700b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX512()",
5710b57cec5SDimitry Andric            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
5720b57cec5SDimitry Andric
5730b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
5740b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
575349cc55cSDimitry Andric  CCIfType<[i32, i64, f16, f32, f64], CCAssignToStack<8, 8>>,
5760b57cec5SDimitry Andric
5770b57cec5SDimitry Andric  // Long doubles get stack slots whose size and alignment depends on the
5780b57cec5SDimitry Andric  // subtarget.
5790b57cec5SDimitry Andric  CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
5800b57cec5SDimitry Andric
5810b57cec5SDimitry Andric  // Vectors get 16-byte stack slots that are 16-byte aligned.
582349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCAssignToStack<16, 16>>,
5830b57cec5SDimitry Andric
5840b57cec5SDimitry Andric  // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
585349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
5860b57cec5SDimitry Andric           CCAssignToStack<32, 32>>,
5870b57cec5SDimitry Andric
5880b57cec5SDimitry Andric  // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
589349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
5900b57cec5SDimitry Andric           CCAssignToStack<64, 64>>
5910b57cec5SDimitry Andric]>;
5920b57cec5SDimitry Andric
5930b57cec5SDimitry Andric// Calling convention used on Win64
5940b57cec5SDimitry Andricdef CC_X86_Win64_C : CallingConv<[
5950b57cec5SDimitry Andric  // FIXME: Handle varargs.
5960b57cec5SDimitry Andric
5970b57cec5SDimitry Andric  // Byval aggregates are passed by pointer
5980b57cec5SDimitry Andric  CCIfByVal<CCPassIndirect<i64>>,
5990b57cec5SDimitry Andric
6000b57cec5SDimitry Andric  // Promote i1/v1i1 arguments to i8.
6010b57cec5SDimitry Andric  CCIfType<[i1, v1i1], CCPromoteToType<i8>>,
6020b57cec5SDimitry Andric
6030b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in R10.
6040b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[R10]>>,
6050b57cec5SDimitry Andric
6060b57cec5SDimitry Andric  // A SwiftError is passed in R12.
6070b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
6080b57cec5SDimitry Andric
609fe6060f1SDimitry Andric  // Pass SwiftSelf in a callee saved register.
610fe6060f1SDimitry Andric  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
611fe6060f1SDimitry Andric
612fe6060f1SDimitry Andric  // Pass SwiftAsync in an otherwise callee saved register so that calls to
613fe6060f1SDimitry Andric  // normal functions don't need to save it somewhere.
614fe6060f1SDimitry Andric  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[R14]>>>,
615fe6060f1SDimitry Andric
616480093f4SDimitry Andric  // The 'CFGuardTarget' parameter, if any, is passed in RAX.
617480093f4SDimitry Andric  CCIfCFGuardTarget<CCAssignToReg<[RAX]>>,
618480093f4SDimitry Andric
6190b57cec5SDimitry Andric  // 128 bit vectors are passed by pointer
620349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>,
6210b57cec5SDimitry Andric
6220b57cec5SDimitry Andric  // 256 bit vectors are passed by pointer
623349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>,
6240b57cec5SDimitry Andric
6250b57cec5SDimitry Andric  // 512 bit vectors are passed by pointer
626349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
6270b57cec5SDimitry Andric
6280b57cec5SDimitry Andric  // Long doubles are passed by pointer
6290b57cec5SDimitry Andric  CCIfType<[f80], CCPassIndirect<i64>>,
6300b57cec5SDimitry Andric
6310b57cec5SDimitry Andric  // The first 4 MMX vector arguments are passed in GPRs.
6320b57cec5SDimitry Andric  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
6330b57cec5SDimitry Andric
634480093f4SDimitry Andric  // If SSE was disabled, pass FP values smaller than 64-bits as integers in
635480093f4SDimitry Andric  // GPRs or on the stack.
636480093f4SDimitry Andric  CCIfType<[f32], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i32>>>,
637480093f4SDimitry Andric  CCIfType<[f64], CCIfNotSubtarget<"hasSSE1()", CCBitConvertToType<i64>>>,
638480093f4SDimitry Andric
639480093f4SDimitry Andric  // The first 4 FP/Vector arguments are passed in XMM registers.
640349cc55cSDimitry Andric  CCIfType<[f16, f32, f64],
641480093f4SDimitry Andric           CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
642480093f4SDimitry Andric                                   [RCX , RDX , R8  , R9  ]>>,
643480093f4SDimitry Andric
6440b57cec5SDimitry Andric  // The first 4 integer arguments are passed in integer registers.
6450b57cec5SDimitry Andric  CCIfType<[i8 ], CCAssignToRegWithShadow<[CL  , DL  , R8B , R9B ],
6460b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6470b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToRegWithShadow<[CX  , DX  , R8W , R9W ],
6480b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6490b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
6500b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6510b57cec5SDimitry Andric
6520b57cec5SDimitry Andric  // Do not pass the sret argument in RCX, the Win64 thiscall calling
6530b57cec5SDimitry Andric  // convention requires "this" to be passed in RCX.
6540b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_ThisCall",
6550b57cec5SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8  , R9  ],
6560b57cec5SDimitry Andric                                                     [XMM1, XMM2, XMM3]>>>>,
6570b57cec5SDimitry Andric
6580b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8  , R9  ],
6590b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
6600b57cec5SDimitry Andric
6610b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
6620b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
663349cc55cSDimitry Andric  CCIfType<[i8, i16, i32, i64, f16, f32, f64], CCAssignToStack<8, 8>>
6640b57cec5SDimitry Andric]>;
6650b57cec5SDimitry Andric
6660b57cec5SDimitry Andricdef CC_X86_Win64_VectorCall : CallingConv<[
6670b57cec5SDimitry Andric  CCCustom<"CC_X86_64_VectorCall">,
6680b57cec5SDimitry Andric
6690b57cec5SDimitry Andric  // Delegate to fastcall to handle integer types.
6700b57cec5SDimitry Andric  CCDelegateTo<CC_X86_Win64_C>
6710b57cec5SDimitry Andric]>;
6720b57cec5SDimitry Andric
6730b57cec5SDimitry Andric
6740b57cec5SDimitry Andricdef CC_X86_64_GHC : CallingConv<[
6750b57cec5SDimitry Andric  // Promote i8/i16/i32 arguments to i64.
6760b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
6770b57cec5SDimitry Andric
6780b57cec5SDimitry Andric  // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
6790b57cec5SDimitry Andric  CCIfType<[i64],
6800b57cec5SDimitry Andric            CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
6810b57cec5SDimitry Andric
6820b57cec5SDimitry Andric  // Pass in STG registers: F1, F2, F3, F4, D1, D2
6830b57cec5SDimitry Andric  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
6840b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE1()",
6850b57cec5SDimitry Andric            CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>,
6860b57cec5SDimitry Andric  // AVX
6870b57cec5SDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
6880b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX()",
6890b57cec5SDimitry Andric            CCAssignToReg<[YMM1, YMM2, YMM3, YMM4, YMM5, YMM6]>>>,
6900b57cec5SDimitry Andric  // AVX-512
6910b57cec5SDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
6920b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX512()",
6930b57cec5SDimitry Andric            CCAssignToReg<[ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6]>>>
6940b57cec5SDimitry Andric]>;
6950b57cec5SDimitry Andric
6960b57cec5SDimitry Andricdef CC_X86_64_HiPE : CallingConv<[
6970b57cec5SDimitry Andric  // Promote i8/i16/i32 arguments to i64.
6980b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
6990b57cec5SDimitry Andric
7000b57cec5SDimitry Andric  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
7010b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
7020b57cec5SDimitry Andric
7030b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
7040b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
7050b57cec5SDimitry Andric  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
7060b57cec5SDimitry Andric]>;
7070b57cec5SDimitry Andric
7080b57cec5SDimitry Andricdef CC_X86_64_WebKit_JS : CallingConv<[
7090b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
7100b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
7110b57cec5SDimitry Andric
7120b57cec5SDimitry Andric  // Only the first integer argument is passed in register.
7130b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX]>>,
7140b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX]>>,
7150b57cec5SDimitry Andric
7160b57cec5SDimitry Andric  // The remaining integer arguments are passed on the stack. 32bit integer and
7170b57cec5SDimitry Andric  // floating-point arguments are aligned to 4 byte and stored in 4 byte slots.
7180b57cec5SDimitry Andric  // 64bit integer and floating-point arguments are aligned to 8 byte and stored
7190b57cec5SDimitry Andric  // in 8 byte stack slots.
7200b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
7210b57cec5SDimitry Andric  CCIfType<[i64, f64], CCAssignToStack<8, 8>>
7220b57cec5SDimitry Andric]>;
7230b57cec5SDimitry Andric
7240b57cec5SDimitry Andric// No explicit register is specified for the AnyReg calling convention. The
7250b57cec5SDimitry Andric// register allocator may assign the arguments to any free register.
7260b57cec5SDimitry Andric//
7270b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and
7280b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug
7290b57cec5SDimitry Andric// builds. On Release builds we fallback to the X86 C calling convention.
7300b57cec5SDimitry Andricdef CC_X86_64_AnyReg : CallingConv<[
7310b57cec5SDimitry Andric  CCCustom<"CC_X86_AnyReg_Error">
7320b57cec5SDimitry Andric]>;
7330b57cec5SDimitry Andric
7340b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
7350b57cec5SDimitry Andric// X86 C Calling Convention
7360b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
7370b57cec5SDimitry Andric
7380b57cec5SDimitry Andric/// CC_X86_32_Vector_Common - In all X86-32 calling conventions, extra vector
7390b57cec5SDimitry Andric/// values are spilled on the stack.
7400b57cec5SDimitry Andricdef CC_X86_32_Vector_Common : CallingConv<[
7410b57cec5SDimitry Andric  // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
742349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
743349cc55cSDimitry Andric           CCAssignToStack<16, 16>>,
7440b57cec5SDimitry Andric
7450b57cec5SDimitry Andric  // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
746349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
7470b57cec5SDimitry Andric           CCAssignToStack<32, 32>>,
7480b57cec5SDimitry Andric
7490b57cec5SDimitry Andric  // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
750349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
7510b57cec5SDimitry Andric           CCAssignToStack<64, 64>>
7520b57cec5SDimitry Andric]>;
7530b57cec5SDimitry Andric
754349cc55cSDimitry Andric/// CC_X86_Win32_Vector - In X86 Win32 calling conventions, extra vector
755349cc55cSDimitry Andric/// values are spilled on the stack.
756349cc55cSDimitry Andricdef CC_X86_Win32_Vector : CallingConv<[
757349cc55cSDimitry Andric  // Other SSE vectors get 16-byte stack slots that are 4-byte aligned.
758349cc55cSDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
759349cc55cSDimitry Andric           CCAssignToStack<16, 4>>,
760349cc55cSDimitry Andric
761349cc55cSDimitry Andric  // 256-bit AVX vectors get 32-byte stack slots that are 4-byte aligned.
762349cc55cSDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
763349cc55cSDimitry Andric           CCAssignToStack<32, 4>>,
764349cc55cSDimitry Andric
765349cc55cSDimitry Andric  // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 4-byte aligned.
766349cc55cSDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
767349cc55cSDimitry Andric           CCAssignToStack<64, 4>>
768349cc55cSDimitry Andric]>;
769349cc55cSDimitry Andric
7700b57cec5SDimitry Andric// CC_X86_32_Vector_Standard - The first 3 vector arguments are passed in
7710b57cec5SDimitry Andric// vector registers
7720b57cec5SDimitry Andricdef CC_X86_32_Vector_Standard : CallingConv<[
7730b57cec5SDimitry Andric  // SSE vector arguments are passed in XMM registers.
774349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
7750b57cec5SDimitry Andric                CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
7760b57cec5SDimitry Andric
7770b57cec5SDimitry Andric  // AVX 256-bit vector arguments are passed in YMM registers.
778349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
7790b57cec5SDimitry Andric                CCIfSubtarget<"hasAVX()",
7800b57cec5SDimitry Andric                CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
7810b57cec5SDimitry Andric
7820b57cec5SDimitry Andric  // AVX 512-bit vector arguments are passed in ZMM registers.
783349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
7840b57cec5SDimitry Andric                CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
7850b57cec5SDimitry Andric
786349cc55cSDimitry Andric  CCIfIsVarArgOnWin<CCDelegateTo<CC_X86_Win32_Vector>>,
7870b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Common>
7880b57cec5SDimitry Andric]>;
7890b57cec5SDimitry Andric
7900b57cec5SDimitry Andric// CC_X86_32_Vector_Darwin - The first 4 vector arguments are passed in
7910b57cec5SDimitry Andric// vector registers.
7920b57cec5SDimitry Andricdef CC_X86_32_Vector_Darwin : CallingConv<[
7930b57cec5SDimitry Andric  // SSE vector arguments are passed in XMM registers.
794349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
7950b57cec5SDimitry Andric                CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
7960b57cec5SDimitry Andric
7970b57cec5SDimitry Andric  // AVX 256-bit vector arguments are passed in YMM registers.
798349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
7990b57cec5SDimitry Andric                CCIfSubtarget<"hasAVX()",
8000b57cec5SDimitry Andric                CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
8010b57cec5SDimitry Andric
8020b57cec5SDimitry Andric  // AVX 512-bit vector arguments are passed in ZMM registers.
803349cc55cSDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
8040b57cec5SDimitry Andric                CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
8050b57cec5SDimitry Andric
8060b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Common>
8070b57cec5SDimitry Andric]>;
8080b57cec5SDimitry Andric
8090b57cec5SDimitry Andric/// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
8100b57cec5SDimitry Andric/// values are spilled on the stack.
8110b57cec5SDimitry Andricdef CC_X86_32_Common : CallingConv<[
8125ffd83dbSDimitry Andric  // Handles byval/preallocated parameters.
8130b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
8145ffd83dbSDimitry Andric  CCIfPreallocated<CCPassByVal<4, 4>>,
8150b57cec5SDimitry Andric
8160b57cec5SDimitry Andric  // The first 3 float or double arguments, if marked 'inreg' and if the call
8170b57cec5SDimitry Andric  // is not a vararg call and if SSE2 is available, are passed in SSE registers.
8180b57cec5SDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
8190b57cec5SDimitry Andric                CCIfSubtarget<"hasSSE2()",
8200b57cec5SDimitry Andric                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
8210b57cec5SDimitry Andric
822349cc55cSDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[f16], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
823349cc55cSDimitry Andric
8240b57cec5SDimitry Andric  // The first 3 __m64 vector arguments are passed in mmx registers if the
8250b57cec5SDimitry Andric  // call is not a vararg call.
8260b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[x86mmx],
8270b57cec5SDimitry Andric                CCAssignToReg<[MM0, MM1, MM2]>>>,
8280b57cec5SDimitry Andric
829349cc55cSDimitry Andric  CCIfType<[f16], CCAssignToStack<4, 4>>,
830349cc55cSDimitry Andric
8310b57cec5SDimitry Andric  // Integer/Float values get stored in stack slots that are 4 bytes in
8320b57cec5SDimitry Andric  // size and 4-byte aligned.
8330b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
8340b57cec5SDimitry Andric
8350b57cec5SDimitry Andric  // Doubles get 8-byte slots that are 4-byte aligned.
8360b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToStack<8, 4>>,
8370b57cec5SDimitry Andric
838349cc55cSDimitry Andric  // Long doubles get slots whose size and alignment depends on the subtarget.
839349cc55cSDimitry Andric  CCIfType<[f80], CCAssignToStack<0, 0>>,
8400b57cec5SDimitry Andric
8410b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are passed in SIMD registers.
8420b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
8430b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
8440b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
8450b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
8460b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
8470b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
8480b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
8490b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
8500b57cec5SDimitry Andric
8510b57cec5SDimitry Andric  // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
8520b57cec5SDimitry Andric  // passed in the parameter area.
8530b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
8540b57cec5SDimitry Andric
8550b57cec5SDimitry Andric  // Darwin passes vectors in a form that differs from the i386 psABI
8560b57cec5SDimitry Andric  CCIfSubtarget<"isTargetDarwin()", CCDelegateTo<CC_X86_32_Vector_Darwin>>,
8570b57cec5SDimitry Andric
8580b57cec5SDimitry Andric  // Otherwise, drop to 'normal' X86-32 CC
8590b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Standard>
8600b57cec5SDimitry Andric]>;
8610b57cec5SDimitry Andric
8620b57cec5SDimitry Andricdef CC_X86_32_C : CallingConv<[
8630b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
8640b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
8650b57cec5SDimitry Andric
8660b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in ECX.
8670b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[ECX]>>,
8680b57cec5SDimitry Andric
869fe6060f1SDimitry Andric  // On swifttailcc pass swiftself in ECX.
870fe6060f1SDimitry Andric  CCIfCC<"CallingConv::SwiftTail",
871fe6060f1SDimitry Andric         CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[ECX]>>>>,
872fe6060f1SDimitry Andric
8730b57cec5SDimitry Andric  // The first 3 integer arguments, if marked 'inreg' and if the call is not
8740b57cec5SDimitry Andric  // a vararg call, are passed in integer registers.
8750b57cec5SDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
8760b57cec5SDimitry Andric
8770b57cec5SDimitry Andric  // Otherwise, same as everything else.
8780b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
8790b57cec5SDimitry Andric]>;
8800b57cec5SDimitry Andric
8810b57cec5SDimitry Andricdef CC_X86_32_MCU : CallingConv<[
8820b57cec5SDimitry Andric  // Handles byval parameters.  Note that, like FastCC, we can't rely on
8830b57cec5SDimitry Andric  // the delegation to CC_X86_32_Common because that happens after code that
8840b57cec5SDimitry Andric  // puts arguments in registers.
8850b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
8860b57cec5SDimitry Andric
8870b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
8880b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
8890b57cec5SDimitry Andric
8900b57cec5SDimitry Andric  // If the call is not a vararg call, some arguments may be passed
8910b57cec5SDimitry Andric  // in integer registers.
8920b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[i32], CCCustom<"CC_X86_32_MCUInReg">>>,
8930b57cec5SDimitry Andric
8940b57cec5SDimitry Andric  // Otherwise, same as everything else.
8950b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
8960b57cec5SDimitry Andric]>;
8970b57cec5SDimitry Andric
8980b57cec5SDimitry Andricdef CC_X86_32_FastCall : CallingConv<[
8990b57cec5SDimitry Andric  // Promote i1 to i8.
9000b57cec5SDimitry Andric  CCIfType<[i1], CCPromoteToType<i8>>,
9010b57cec5SDimitry Andric
9020b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in EAX.
9030b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[EAX]>>,
9040b57cec5SDimitry Andric
9050b57cec5SDimitry Andric  // The first 2 integer arguments are passed in ECX/EDX
9060b57cec5SDimitry Andric  CCIfInReg<CCIfType<[ i8], CCAssignToReg<[ CL,  DL]>>>,
9070b57cec5SDimitry Andric  CCIfInReg<CCIfType<[i16], CCAssignToReg<[ CX,  DX]>>>,
9080b57cec5SDimitry Andric  CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
9090b57cec5SDimitry Andric
9100b57cec5SDimitry Andric  // Otherwise, same as everything else.
9110b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
9120b57cec5SDimitry Andric]>;
9130b57cec5SDimitry Andric
9140b57cec5SDimitry Andricdef CC_X86_Win32_VectorCall : CallingConv<[
9150b57cec5SDimitry Andric  // Pass floating point in XMMs
9160b57cec5SDimitry Andric  CCCustom<"CC_X86_32_VectorCall">,
9170b57cec5SDimitry Andric
9180b57cec5SDimitry Andric  // Delegate to fastcall to handle integer types.
9190b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_FastCall>
9200b57cec5SDimitry Andric]>;
9210b57cec5SDimitry Andric
9220b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Common : CallingConv<[
9230b57cec5SDimitry Andric  // The first integer argument is passed in ECX
9240b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX]>>,
9250b57cec5SDimitry Andric
9260b57cec5SDimitry Andric  // Otherwise, same as everything else.
9270b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
9280b57cec5SDimitry Andric]>;
9290b57cec5SDimitry Andric
9300b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Mingw : CallingConv<[
9310b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
9320b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
9330b57cec5SDimitry Andric
9340b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Common>
9350b57cec5SDimitry Andric]>;
9360b57cec5SDimitry Andric
9370b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Win : CallingConv<[
9380b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
9390b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
9400b57cec5SDimitry Andric
9410b57cec5SDimitry Andric  // Pass sret arguments indirectly through stack.
9420b57cec5SDimitry Andric  CCIfSRet<CCAssignToStack<4, 4>>,
9430b57cec5SDimitry Andric
9440b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Common>
9450b57cec5SDimitry Andric]>;
9460b57cec5SDimitry Andric
9470b57cec5SDimitry Andricdef CC_X86_32_ThisCall : CallingConv<[
9480b57cec5SDimitry Andric  CCIfSubtarget<"isTargetCygMing()", CCDelegateTo<CC_X86_32_ThisCall_Mingw>>,
9490b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Win>
9500b57cec5SDimitry Andric]>;
9510b57cec5SDimitry Andric
9520b57cec5SDimitry Andricdef CC_X86_32_FastCC : CallingConv<[
9530b57cec5SDimitry Andric  // Handles byval parameters.  Note that we can't rely on the delegation
9540b57cec5SDimitry Andric  // to CC_X86_32_Common for this because that happens after code that
9550b57cec5SDimitry Andric  // puts arguments in registers.
9560b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
9570b57cec5SDimitry Andric
9580b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
9590b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
9600b57cec5SDimitry Andric
9610b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in EAX.
9620b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[EAX]>>,
9630b57cec5SDimitry Andric
9640b57cec5SDimitry Andric  // The first 2 integer arguments are passed in ECX/EDX
9650b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
9660b57cec5SDimitry Andric
9670b57cec5SDimitry Andric  // The first 3 float or double arguments, if the call is not a vararg
9680b57cec5SDimitry Andric  // call and if SSE2 is available, are passed in SSE registers.
9690b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[f32,f64],
9700b57cec5SDimitry Andric                CCIfSubtarget<"hasSSE2()",
9710b57cec5SDimitry Andric                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
9720b57cec5SDimitry Andric
9730b57cec5SDimitry Andric  // Doubles get 8-byte slots that are 8-byte aligned.
9740b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToStack<8, 8>>,
9750b57cec5SDimitry Andric
9760b57cec5SDimitry Andric  // Otherwise, same as everything else.
9770b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
9780b57cec5SDimitry Andric]>;
9790b57cec5SDimitry Andric
980480093f4SDimitry Andricdef CC_X86_Win32_CFGuard_Check : CallingConv<[
981480093f4SDimitry Andric  // The CFGuard check call takes exactly one integer argument
982480093f4SDimitry Andric  // (i.e. the target function address), which is passed in ECX.
983480093f4SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX]>>
984480093f4SDimitry Andric]>;
985480093f4SDimitry Andric
9860b57cec5SDimitry Andricdef CC_X86_32_GHC : CallingConv<[
9870b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
9880b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
9890b57cec5SDimitry Andric
9900b57cec5SDimitry Andric  // Pass in STG registers: Base, Sp, Hp, R1
9910b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
9920b57cec5SDimitry Andric]>;
9930b57cec5SDimitry Andric
9940b57cec5SDimitry Andricdef CC_X86_32_HiPE : CallingConv<[
9950b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
9960b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
9970b57cec5SDimitry Andric
9980b57cec5SDimitry Andric  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
9990b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
10000b57cec5SDimitry Andric
10010b57cec5SDimitry Andric  // Integer/Float values get stored in stack slots that are 4 bytes in
10020b57cec5SDimitry Andric  // size and 4-byte aligned.
10030b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>
10040b57cec5SDimitry Andric]>;
10050b57cec5SDimitry Andric
10060b57cec5SDimitry Andric// X86-64 Intel OpenCL built-ins calling convention.
10070b57cec5SDimitry Andricdef CC_Intel_OCL_BI : CallingConv<[
10080b57cec5SDimitry Andric
10090b57cec5SDimitry Andric  CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
10100b57cec5SDimitry Andric  CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8,  R9 ]>>>,
10110b57cec5SDimitry Andric
10120b57cec5SDimitry Andric  CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
10130b57cec5SDimitry Andric  CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
10140b57cec5SDimitry Andric
10150b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToStack<4, 4>>,
10160b57cec5SDimitry Andric
10170b57cec5SDimitry Andric  // The SSE vector arguments are passed in XMM registers.
10180b57cec5SDimitry Andric  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
10190b57cec5SDimitry Andric           CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
10200b57cec5SDimitry Andric
10210b57cec5SDimitry Andric  // The 256-bit vector arguments are passed in YMM registers.
10220b57cec5SDimitry Andric  CCIfType<[v8f32, v4f64, v8i32, v4i64],
10230b57cec5SDimitry Andric           CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
10240b57cec5SDimitry Andric
10250b57cec5SDimitry Andric  // The 512-bit vector arguments are passed in ZMM registers.
10260b57cec5SDimitry Andric  CCIfType<[v16f32, v8f64, v16i32, v8i64],
10270b57cec5SDimitry Andric           CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
10280b57cec5SDimitry Andric
10290b57cec5SDimitry Andric  // Pass masks in mask registers
10300b57cec5SDimitry Andric  CCIfType<[v16i1, v8i1], CCAssignToReg<[K1]>>,
10310b57cec5SDimitry Andric
10320b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
10330b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()",       CCDelegateTo<CC_X86_64_C>>,
10340b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_C>
10350b57cec5SDimitry Andric]>;
10360b57cec5SDimitry Andric
10370b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
10380b57cec5SDimitry Andric// X86 Root Argument Calling Conventions
10390b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
10400b57cec5SDimitry Andric
10410b57cec5SDimitry Andric// This is the root argument convention for the X86-32 backend.
10420b57cec5SDimitry Andricdef CC_X86_32 : CallingConv<[
10430b57cec5SDimitry Andric  // X86_INTR calling convention is valid in MCU target and should override the
10440b57cec5SDimitry Andric  // MCU calling convention. Thus, this should be checked before isTargetMCU().
10450b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
10460b57cec5SDimitry Andric  CCIfSubtarget<"isTargetMCU()", CCDelegateTo<CC_X86_32_MCU>>,
10470b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
10480b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win32_VectorCall>>,
10490b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
1050480093f4SDimitry Andric  CCIfCC<"CallingConv::CFGuard_Check", CCDelegateTo<CC_X86_Win32_CFGuard_Check>>,
10510b57cec5SDimitry Andric  CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
10528bcb0991SDimitry Andric  CCIfCC<"CallingConv::Tail", CCDelegateTo<CC_X86_32_FastCC>>,
10530b57cec5SDimitry Andric  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
10540b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
10550b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_32_RegCall>>,
10560b57cec5SDimitry Andric
10570b57cec5SDimitry Andric  // Otherwise, drop to normal X86-32 CC
10580b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_C>
10590b57cec5SDimitry Andric]>;
10600b57cec5SDimitry Andric
10610b57cec5SDimitry Andric// This is the root argument convention for the X86-64 backend.
10620b57cec5SDimitry Andricdef CC_X86_64 : CallingConv<[
10630b57cec5SDimitry Andric  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
10640b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
10650b57cec5SDimitry Andric  CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>,
10660b57cec5SDimitry Andric  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
10670b57cec5SDimitry Andric  CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
10680b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
10690b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
10700b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
10710b57cec5SDimitry Andric    CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
10720b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
10730b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
10740b57cec5SDimitry Andric
10750b57cec5SDimitry Andric  // Mingw64 and native Win64 use Win64 CC
10760b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
10770b57cec5SDimitry Andric
10780b57cec5SDimitry Andric  // Otherwise, drop to normal X86-64 CC
10790b57cec5SDimitry Andric  CCDelegateTo<CC_X86_64_C>
10800b57cec5SDimitry Andric]>;
10810b57cec5SDimitry Andric
10820b57cec5SDimitry Andric// This is the argument convention used for the entire X86 backend.
10830b57cec5SDimitry Andriclet Entry = 1 in
10840b57cec5SDimitry Andricdef CC_X86 : CallingConv<[
10850b57cec5SDimitry Andric  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
10860b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
10870b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32>
10880b57cec5SDimitry Andric]>;
10890b57cec5SDimitry Andric
10900b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
10910b57cec5SDimitry Andric// Callee-saved Registers.
10920b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
10930b57cec5SDimitry Andric
10940b57cec5SDimitry Andricdef CSR_NoRegs : CalleeSavedRegs<(add)>;
10950b57cec5SDimitry Andric
10960b57cec5SDimitry Andricdef CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
10970b57cec5SDimitry Andricdef CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
10980b57cec5SDimitry Andric
10990b57cec5SDimitry Andricdef CSR_64_SwiftError : CalleeSavedRegs<(sub CSR_64, R12)>;
1100fe6060f1SDimitry Andricdef CSR_64_SwiftTail : CalleeSavedRegs<(sub CSR_64, R13, R14)>;
11010b57cec5SDimitry Andric
11020b57cec5SDimitry Andricdef CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
11030b57cec5SDimitry Andricdef CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
11040b57cec5SDimitry Andric
11050b57cec5SDimitry Andricdef CSR_Win64_NoSSE : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15)>;
11060b57cec5SDimitry Andric
11070b57cec5SDimitry Andricdef CSR_Win64 : CalleeSavedRegs<(add CSR_Win64_NoSSE,
11080b57cec5SDimitry Andric                                     (sequence "XMM%u", 6, 15))>;
11090b57cec5SDimitry Andric
11100b57cec5SDimitry Andricdef CSR_Win64_SwiftError : CalleeSavedRegs<(sub CSR_Win64, R12)>;
1111fe6060f1SDimitry Andricdef CSR_Win64_SwiftTail : CalleeSavedRegs<(sub CSR_Win64, R13, R14)>;
11120b57cec5SDimitry Andric
11130b57cec5SDimitry Andric// The function used by Darwin to obtain the address of a thread-local variable
11140b57cec5SDimitry Andric// uses rdi to pass a single parameter and rax for the return value. All other
11150b57cec5SDimitry Andric// GPRs are preserved.
11160b57cec5SDimitry Andricdef CSR_64_TLS_Darwin : CalleeSavedRegs<(add CSR_64, RCX, RDX, RSI,
11170b57cec5SDimitry Andric                                             R8, R9, R10, R11)>;
11180b57cec5SDimitry Andric
11190b57cec5SDimitry Andric// CSRs that are handled by prologue, epilogue.
11200b57cec5SDimitry Andricdef CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add RBP)>;
11210b57cec5SDimitry Andric
11220b57cec5SDimitry Andric// CSRs that are handled explicitly via copies.
11230b57cec5SDimitry Andricdef CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(sub CSR_64_TLS_Darwin, RBP)>;
11240b57cec5SDimitry Andric
1125*06c3fb27SDimitry Andric// All GPRs - except r11 and return registers.
11260b57cec5SDimitry Andricdef CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
1127e8d8bef9SDimitry Andric                                              R8, R9, R10)>;
11280b57cec5SDimitry Andric
1129*06c3fb27SDimitry Andric// All registers - except r11 and return registers.
11300b57cec5SDimitry Andricdef CSR_64_RT_AllRegs     : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
11310b57cec5SDimitry Andric                                                 (sequence "XMM%u", 0, 15))>;
11320b57cec5SDimitry Andricdef CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
11330b57cec5SDimitry Andric                                                 (sequence "YMM%u", 0, 15))>;
11340b57cec5SDimitry Andric
11350b57cec5SDimitry Andricdef CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
11360b57cec5SDimitry Andric                                           R11, R12, R13, R14, R15, RBP,
11370b57cec5SDimitry Andric                                           (sequence "XMM%u", 0, 15))>;
11380b57cec5SDimitry Andric
11390b57cec5SDimitry Andricdef CSR_32_AllRegs     : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
11400b57cec5SDimitry Andric                                              EDI)>;
11410b57cec5SDimitry Andricdef CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
11420b57cec5SDimitry Andric                                              (sequence "XMM%u", 0, 7))>;
11430b57cec5SDimitry Andricdef CSR_32_AllRegs_AVX : CalleeSavedRegs<(add CSR_32_AllRegs,
11440b57cec5SDimitry Andric                                              (sequence "YMM%u", 0, 7))>;
11450b57cec5SDimitry Andricdef CSR_32_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_32_AllRegs,
11460b57cec5SDimitry Andric                                                 (sequence "ZMM%u", 0, 7),
11470b57cec5SDimitry Andric                                                 (sequence "K%u", 0, 7))>;
11480b57cec5SDimitry Andric
11490b57cec5SDimitry Andricdef CSR_64_AllRegs     : CalleeSavedRegs<(add CSR_64_MostRegs, RAX)>;
11500b57cec5SDimitry Andricdef CSR_64_AllRegs_NoSSE : CalleeSavedRegs<(add RAX, RBX, RCX, RDX, RSI, RDI, R8, R9,
11510b57cec5SDimitry Andric                                                R10, R11, R12, R13, R14, R15, RBP)>;
11520b57cec5SDimitry Andricdef CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
11530b57cec5SDimitry Andric                                                   (sequence "YMM%u", 0, 15)),
11540b57cec5SDimitry Andric                                              (sequence "XMM%u", 0, 15))>;
11550b57cec5SDimitry Andricdef CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
11560b57cec5SDimitry Andric                                                      (sequence "ZMM%u", 0, 31),
11570b57cec5SDimitry Andric                                                      (sequence "K%u", 0, 7)),
11580b57cec5SDimitry Andric                                                 (sequence "XMM%u", 0, 15))>;
11590b57cec5SDimitry Andric
11600b57cec5SDimitry Andric// Standard C + YMM6-15
11610b57cec5SDimitry Andricdef CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
11620b57cec5SDimitry Andric                                                  R13, R14, R15,
11630b57cec5SDimitry Andric                                                  (sequence "YMM%u", 6, 15))>;
11640b57cec5SDimitry Andric
11650b57cec5SDimitry Andricdef CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
11660b57cec5SDimitry Andric                                                     R12, R13, R14, R15,
11670b57cec5SDimitry Andric                                                     (sequence "ZMM%u", 6, 21),
11680b57cec5SDimitry Andric                                                     K4, K5, K6, K7)>;
11690b57cec5SDimitry Andric//Standard C + XMM 8-15
11700b57cec5SDimitry Andricdef CSR_64_Intel_OCL_BI       : CalleeSavedRegs<(add CSR_64,
11710b57cec5SDimitry Andric                                                 (sequence "XMM%u", 8, 15))>;
11720b57cec5SDimitry Andric
11730b57cec5SDimitry Andric//Standard C + YMM 8-15
11740b57cec5SDimitry Andricdef CSR_64_Intel_OCL_BI_AVX    : CalleeSavedRegs<(add CSR_64,
11750b57cec5SDimitry Andric                                                  (sequence "YMM%u", 8, 15))>;
11760b57cec5SDimitry Andric
11775ffd83dbSDimitry Andricdef CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RSI, R14, R15,
11780b57cec5SDimitry Andric                                                  (sequence "ZMM%u", 16, 31),
11790b57cec5SDimitry Andric                                                  K4, K5, K6, K7)>;
11800b57cec5SDimitry Andric
11810b57cec5SDimitry Andric// Register calling convention preserves few GPR and XMM8-15
1182e8d8bef9SDimitry Andricdef CSR_32_RegCall_NoSSE : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
11830b57cec5SDimitry Andricdef CSR_32_RegCall       : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE,
11840b57cec5SDimitry Andric                                           (sequence "XMM%u", 4, 7))>;
1185480093f4SDimitry Andricdef CSR_Win32_CFGuard_Check_NoSSE : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE, ECX)>;
1186480093f4SDimitry Andricdef CSR_Win32_CFGuard_Check       : CalleeSavedRegs<(add CSR_32_RegCall, ECX)>;
1187e8d8bef9SDimitry Andricdef CSR_Win64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP,
11880b57cec5SDimitry Andric                                              (sequence "R%u", 10, 15))>;
11890b57cec5SDimitry Andricdef CSR_Win64_RegCall       : CalleeSavedRegs<(add CSR_Win64_RegCall_NoSSE,
11900b57cec5SDimitry Andric                                              (sequence "XMM%u", 8, 15))>;
1191e8d8bef9SDimitry Andricdef CSR_SysV64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP,
11920b57cec5SDimitry Andric                                               (sequence "R%u", 12, 15))>;
11930b57cec5SDimitry Andricdef CSR_SysV64_RegCall       : CalleeSavedRegs<(add CSR_SysV64_RegCall_NoSSE,
11940b57cec5SDimitry Andric                                               (sequence "XMM%u", 8, 15))>;
1195