xref: /freebsd/contrib/llvm-project/llvm/lib/Target/X86/X86CallingConv.td (revision 0b57cec536236d46e3dba9bd041533462f33dbb7)
1*0b57cec5SDimitry Andric//===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
2*0b57cec5SDimitry Andric//
3*0b57cec5SDimitry Andric// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*0b57cec5SDimitry Andric// See https://llvm.org/LICENSE.txt for license information.
5*0b57cec5SDimitry Andric// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*0b57cec5SDimitry Andric//
7*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
8*0b57cec5SDimitry Andric//
9*0b57cec5SDimitry Andric// This describes the calling conventions for the X86-32 and X86-64
10*0b57cec5SDimitry Andric// architectures.
11*0b57cec5SDimitry Andric//
12*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
13*0b57cec5SDimitry Andric
14*0b57cec5SDimitry Andric/// CCIfSubtarget - Match if the current subtarget has a feature F.
15*0b57cec5SDimitry Andricclass CCIfSubtarget<string F, CCAction A>
16*0b57cec5SDimitry Andric    : CCIf<!strconcat("static_cast<const X86Subtarget&>"
17*0b57cec5SDimitry Andric                       "(State.getMachineFunction().getSubtarget()).", F),
18*0b57cec5SDimitry Andric           A>;
19*0b57cec5SDimitry Andric
20*0b57cec5SDimitry Andric/// CCIfNotSubtarget - Match if the current subtarget doesn't has a feature F.
21*0b57cec5SDimitry Andricclass CCIfNotSubtarget<string F, CCAction A>
22*0b57cec5SDimitry Andric    : CCIf<!strconcat("!static_cast<const X86Subtarget&>"
23*0b57cec5SDimitry Andric                       "(State.getMachineFunction().getSubtarget()).", F),
24*0b57cec5SDimitry Andric           A>;
25*0b57cec5SDimitry Andric
26*0b57cec5SDimitry Andric// Register classes for RegCall
27*0b57cec5SDimitry Andricclass RC_X86_RegCall {
28*0b57cec5SDimitry Andric  list<Register> GPR_8 = [];
29*0b57cec5SDimitry Andric  list<Register> GPR_16 = [];
30*0b57cec5SDimitry Andric  list<Register> GPR_32 = [];
31*0b57cec5SDimitry Andric  list<Register> GPR_64 = [];
32*0b57cec5SDimitry Andric  list<Register> FP_CALL = [FP0];
33*0b57cec5SDimitry Andric  list<Register> FP_RET = [FP0, FP1];
34*0b57cec5SDimitry Andric  list<Register> XMM = [];
35*0b57cec5SDimitry Andric  list<Register> YMM = [];
36*0b57cec5SDimitry Andric  list<Register> ZMM = [];
37*0b57cec5SDimitry Andric}
38*0b57cec5SDimitry Andric
39*0b57cec5SDimitry Andric// RegCall register classes for 32 bits
40*0b57cec5SDimitry Andricdef RC_X86_32_RegCall : RC_X86_RegCall {
41*0b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL];
42*0b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI];
43*0b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI];
44*0b57cec5SDimitry Andric  let GPR_64 = [RAX]; ///< Not actually used, but AssignToReg can't handle []
45*0b57cec5SDimitry Andric                      ///< \todo Fix AssignToReg to enable empty lists
46*0b57cec5SDimitry Andric  let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7];
47*0b57cec5SDimitry Andric  let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7];
48*0b57cec5SDimitry Andric  let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7];
49*0b57cec5SDimitry Andric}
50*0b57cec5SDimitry Andric
51*0b57cec5SDimitry Andricclass RC_X86_64_RegCall : RC_X86_RegCall {
52*0b57cec5SDimitry Andric  let XMM = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
53*0b57cec5SDimitry Andric             XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15];
54*0b57cec5SDimitry Andric  let YMM = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
55*0b57cec5SDimitry Andric             YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15];
56*0b57cec5SDimitry Andric  let ZMM = [ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7,
57*0b57cec5SDimitry Andric             ZMM8, ZMM9, ZMM10, ZMM11, ZMM12, ZMM13, ZMM14, ZMM15];
58*0b57cec5SDimitry Andric}
59*0b57cec5SDimitry Andric
60*0b57cec5SDimitry Andricdef RC_X86_64_RegCall_Win : RC_X86_64_RegCall {
61*0b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R10B, R11B, R12B, R14B, R15B];
62*0b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R10W, R11W, R12W, R14W, R15W];
63*0b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R10D, R11D, R12D, R14D, R15D];
64*0b57cec5SDimitry Andric  let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R10, R11, R12, R14, R15];
65*0b57cec5SDimitry Andric}
66*0b57cec5SDimitry Andric
67*0b57cec5SDimitry Andricdef RC_X86_64_RegCall_SysV : RC_X86_64_RegCall {
68*0b57cec5SDimitry Andric  let GPR_8 = [AL, CL, DL, DIL, SIL, R8B, R9B, R12B, R13B, R14B, R15B];
69*0b57cec5SDimitry Andric  let GPR_16 = [AX, CX, DX, DI, SI, R8W, R9W, R12W, R13W, R14W, R15W];
70*0b57cec5SDimitry Andric  let GPR_32 = [EAX, ECX, EDX, EDI, ESI, R8D, R9D, R12D, R13D, R14D, R15D];
71*0b57cec5SDimitry Andric  let GPR_64 = [RAX, RCX, RDX, RDI, RSI, R8, R9, R12, R13, R14, R15];
72*0b57cec5SDimitry Andric}
73*0b57cec5SDimitry Andric
74*0b57cec5SDimitry Andric// X86-64 Intel regcall calling convention.
75*0b57cec5SDimitry Andricmulticlass X86_RegCall_base<RC_X86_RegCall RC> {
76*0b57cec5SDimitry Andricdef CC_#NAME : CallingConv<[
77*0b57cec5SDimitry Andric  // Handles byval parameters.
78*0b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfByVal<CCPassByVal<8, 8>>>,
79*0b57cec5SDimitry Andric    CCIfByVal<CCPassByVal<4, 4>>,
80*0b57cec5SDimitry Andric
81*0b57cec5SDimitry Andric    // Promote i1/i8/i16/v1i1 arguments to i32.
82*0b57cec5SDimitry Andric    CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
83*0b57cec5SDimitry Andric
84*0b57cec5SDimitry Andric    // Promote v8i1/v16i1/v32i1 arguments to i32.
85*0b57cec5SDimitry Andric    CCIfType<[v8i1, v16i1, v32i1], CCPromoteToType<i32>>,
86*0b57cec5SDimitry Andric
87*0b57cec5SDimitry Andric    // bool, char, int, enum, long, pointer --> GPR
88*0b57cec5SDimitry Andric    CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
89*0b57cec5SDimitry Andric
90*0b57cec5SDimitry Andric    // long long, __int64 --> GPR
91*0b57cec5SDimitry Andric    CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
92*0b57cec5SDimitry Andric
93*0b57cec5SDimitry Andric    // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
94*0b57cec5SDimitry Andric    CCIfType<[v64i1], CCPromoteToType<i64>>,
95*0b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i64],
96*0b57cec5SDimitry Andric      CCAssignToReg<RC.GPR_64>>>,
97*0b57cec5SDimitry Andric    CCIfSubtarget<"is32Bit()", CCIfType<[i64],
98*0b57cec5SDimitry Andric      CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
99*0b57cec5SDimitry Andric
100*0b57cec5SDimitry Andric    // float, double, float128 --> XMM
101*0b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
102*0b57cec5SDimitry Andric    CCIfType<[f32, f64, f128],
103*0b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
104*0b57cec5SDimitry Andric
105*0b57cec5SDimitry Andric    // long double --> FP
106*0b57cec5SDimitry Andric    CCIfType<[f80], CCAssignToReg<RC.FP_CALL>>,
107*0b57cec5SDimitry Andric
108*0b57cec5SDimitry Andric    // __m128, __m128i, __m128d --> XMM
109*0b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
110*0b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
111*0b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
112*0b57cec5SDimitry Andric
113*0b57cec5SDimitry Andric    // __m256, __m256i, __m256d --> YMM
114*0b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
115*0b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
116*0b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
117*0b57cec5SDimitry Andric
118*0b57cec5SDimitry Andric    // __m512, __m512i, __m512d --> ZMM
119*0b57cec5SDimitry Andric    // In the case of SSE disabled --> save to stack
120*0b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
121*0b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX512()",CCAssignToReg<RC.ZMM>>>,
122*0b57cec5SDimitry Andric
123*0b57cec5SDimitry Andric    // If no register was found -> assign to stack
124*0b57cec5SDimitry Andric
125*0b57cec5SDimitry Andric    // In 64 bit, assign 64/32 bit values to 8 byte stack
126*0b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i32, i64, f32, f64],
127*0b57cec5SDimitry Andric      CCAssignToStack<8, 8>>>,
128*0b57cec5SDimitry Andric
129*0b57cec5SDimitry Andric    // In 32 bit, assign 64/32 bit values to 8/4 byte stack
130*0b57cec5SDimitry Andric    CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
131*0b57cec5SDimitry Andric    CCIfType<[i64, f64], CCAssignToStack<8, 4>>,
132*0b57cec5SDimitry Andric
133*0b57cec5SDimitry Andric    // MMX type gets 8 byte slot in stack , while alignment depends on target
134*0b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[x86mmx], CCAssignToStack<8, 8>>>,
135*0b57cec5SDimitry Andric    CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
136*0b57cec5SDimitry Andric
137*0b57cec5SDimitry Andric    // float 128 get stack slots whose size and alignment depends
138*0b57cec5SDimitry Andric    // on the subtarget.
139*0b57cec5SDimitry Andric    CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
140*0b57cec5SDimitry Andric
141*0b57cec5SDimitry Andric    // Vectors get 16-byte stack slots that are 16-byte aligned.
142*0b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
143*0b57cec5SDimitry Andric      CCAssignToStack<16, 16>>,
144*0b57cec5SDimitry Andric
145*0b57cec5SDimitry Andric    // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
146*0b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
147*0b57cec5SDimitry Andric      CCAssignToStack<32, 32>>,
148*0b57cec5SDimitry Andric
149*0b57cec5SDimitry Andric    // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
150*0b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
151*0b57cec5SDimitry Andric      CCAssignToStack<64, 64>>
152*0b57cec5SDimitry Andric]>;
153*0b57cec5SDimitry Andric
154*0b57cec5SDimitry Andricdef RetCC_#NAME : CallingConv<[
155*0b57cec5SDimitry Andric    // Promote i1, v1i1, v8i1 arguments to i8.
156*0b57cec5SDimitry Andric    CCIfType<[i1, v1i1, v8i1], CCPromoteToType<i8>>,
157*0b57cec5SDimitry Andric
158*0b57cec5SDimitry Andric    // Promote v16i1 arguments to i16.
159*0b57cec5SDimitry Andric    CCIfType<[v16i1], CCPromoteToType<i16>>,
160*0b57cec5SDimitry Andric
161*0b57cec5SDimitry Andric    // Promote v32i1 arguments to i32.
162*0b57cec5SDimitry Andric    CCIfType<[v32i1], CCPromoteToType<i32>>,
163*0b57cec5SDimitry Andric
164*0b57cec5SDimitry Andric    // bool, char, int, enum, long, pointer --> GPR
165*0b57cec5SDimitry Andric    CCIfType<[i8], CCAssignToReg<RC.GPR_8>>,
166*0b57cec5SDimitry Andric    CCIfType<[i16], CCAssignToReg<RC.GPR_16>>,
167*0b57cec5SDimitry Andric    CCIfType<[i32], CCAssignToReg<RC.GPR_32>>,
168*0b57cec5SDimitry Andric
169*0b57cec5SDimitry Andric    // long long, __int64 --> GPR
170*0b57cec5SDimitry Andric    CCIfType<[i64], CCAssignToReg<RC.GPR_64>>,
171*0b57cec5SDimitry Andric
172*0b57cec5SDimitry Andric    // __mmask64 (v64i1) --> GPR64 (for x64) or 2 x GPR32 (for IA32)
173*0b57cec5SDimitry Andric    CCIfType<[v64i1], CCPromoteToType<i64>>,
174*0b57cec5SDimitry Andric    CCIfSubtarget<"is64Bit()", CCIfType<[i64],
175*0b57cec5SDimitry Andric      CCAssignToReg<RC.GPR_64>>>,
176*0b57cec5SDimitry Andric    CCIfSubtarget<"is32Bit()", CCIfType<[i64],
177*0b57cec5SDimitry Andric      CCCustom<"CC_X86_32_RegCall_Assign2Regs">>>,
178*0b57cec5SDimitry Andric
179*0b57cec5SDimitry Andric    // long double --> FP
180*0b57cec5SDimitry Andric    CCIfType<[f80], CCAssignToReg<RC.FP_RET>>,
181*0b57cec5SDimitry Andric
182*0b57cec5SDimitry Andric    // float, double, float128 --> XMM
183*0b57cec5SDimitry Andric    CCIfType<[f32, f64, f128],
184*0b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
185*0b57cec5SDimitry Andric
186*0b57cec5SDimitry Andric    // __m128, __m128i, __m128d --> XMM
187*0b57cec5SDimitry Andric    CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
188*0b57cec5SDimitry Andric      CCIfSubtarget<"hasSSE1()", CCAssignToReg<RC.XMM>>>,
189*0b57cec5SDimitry Andric
190*0b57cec5SDimitry Andric    // __m256, __m256i, __m256d --> YMM
191*0b57cec5SDimitry Andric    CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
192*0b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX()", CCAssignToReg<RC.YMM>>>,
193*0b57cec5SDimitry Andric
194*0b57cec5SDimitry Andric    // __m512, __m512i, __m512d --> ZMM
195*0b57cec5SDimitry Andric    CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
196*0b57cec5SDimitry Andric      CCIfSubtarget<"hasAVX512()", CCAssignToReg<RC.ZMM>>>
197*0b57cec5SDimitry Andric]>;
198*0b57cec5SDimitry Andric}
199*0b57cec5SDimitry Andric
200*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
201*0b57cec5SDimitry Andric// Return Value Calling Conventions
202*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
203*0b57cec5SDimitry Andric
204*0b57cec5SDimitry Andric// Return-value conventions common to all X86 CC's.
205*0b57cec5SDimitry Andricdef RetCC_X86Common : CallingConv<[
206*0b57cec5SDimitry Andric  // Scalar values are returned in AX first, then DX.  For i8, the ABI
207*0b57cec5SDimitry Andric  // requires the values to be in AL and AH, however this code uses AL and DL
208*0b57cec5SDimitry Andric  // instead. This is because using AH for the second register conflicts with
209*0b57cec5SDimitry Andric  // the way LLVM does multiple return values -- a return of {i16,i8} would end
210*0b57cec5SDimitry Andric  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
211*0b57cec5SDimitry Andric  // for functions that return two i8 values are currently expected to pack the
212*0b57cec5SDimitry Andric  // values into an i16 (which uses AX, and thus AL:AH).
213*0b57cec5SDimitry Andric  //
214*0b57cec5SDimitry Andric  // For code that doesn't care about the ABI, we allow returning more than two
215*0b57cec5SDimitry Andric  // integer values in registers.
216*0b57cec5SDimitry Andric  CCIfType<[v1i1],  CCPromoteToType<i8>>,
217*0b57cec5SDimitry Andric  CCIfType<[i1],  CCPromoteToType<i8>>,
218*0b57cec5SDimitry Andric  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
219*0b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
220*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
221*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
222*0b57cec5SDimitry Andric
223*0b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are returned in SIMD registers.
224*0b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
225*0b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
226*0b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
227*0b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
228*0b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
229*0b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
230*0b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
231*0b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
232*0b57cec5SDimitry Andric
233*0b57cec5SDimitry Andric  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
234*0b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. If the target doesn't have XMM
235*0b57cec5SDimitry Andric  // registers, it won't have vector types.
236*0b57cec5SDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
237*0b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
238*0b57cec5SDimitry Andric
239*0b57cec5SDimitry Andric  // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
240*0b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. This vector type is only
241*0b57cec5SDimitry Andric  // supported while using the AVX target feature.
242*0b57cec5SDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
243*0b57cec5SDimitry Andric            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
244*0b57cec5SDimitry Andric
245*0b57cec5SDimitry Andric  // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
246*0b57cec5SDimitry Andric  // can only be used by ABI non-compliant code. This vector type is only
247*0b57cec5SDimitry Andric  // supported while using the AVX-512 target feature.
248*0b57cec5SDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
249*0b57cec5SDimitry Andric            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
250*0b57cec5SDimitry Andric
251*0b57cec5SDimitry Andric  // MMX vector types are always returned in MM0. If the target doesn't have
252*0b57cec5SDimitry Andric  // MM0, it doesn't support these vector types.
253*0b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
254*0b57cec5SDimitry Andric
255*0b57cec5SDimitry Andric  // Long double types are always returned in FP0 (even with SSE),
256*0b57cec5SDimitry Andric  // except on Win64.
257*0b57cec5SDimitry Andric  CCIfNotSubtarget<"isTargetWin64()", CCIfType<[f80], CCAssignToReg<[FP0, FP1]>>>
258*0b57cec5SDimitry Andric]>;
259*0b57cec5SDimitry Andric
260*0b57cec5SDimitry Andric// X86-32 C return-value convention.
261*0b57cec5SDimitry Andricdef RetCC_X86_32_C : CallingConv<[
262*0b57cec5SDimitry Andric  // The X86-32 calling convention returns FP values in FP0, unless marked
263*0b57cec5SDimitry Andric  // with "inreg" (used here to distinguish one kind of reg from another,
264*0b57cec5SDimitry Andric  // weirdly; this is really the sse-regparm calling convention) in which
265*0b57cec5SDimitry Andric  // case they use XMM0, otherwise it is the same as the common X86 calling
266*0b57cec5SDimitry Andric  // conv.
267*0b57cec5SDimitry Andric  CCIfInReg<CCIfSubtarget<"hasSSE2()",
268*0b57cec5SDimitry Andric    CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
269*0b57cec5SDimitry Andric  CCIfType<[f32,f64], CCAssignToReg<[FP0, FP1]>>,
270*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
271*0b57cec5SDimitry Andric]>;
272*0b57cec5SDimitry Andric
273*0b57cec5SDimitry Andric// X86-32 FastCC return-value convention.
274*0b57cec5SDimitry Andricdef RetCC_X86_32_Fast : CallingConv<[
275*0b57cec5SDimitry Andric  // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
276*0b57cec5SDimitry Andric  // SSE2.
277*0b57cec5SDimitry Andric  // This can happen when a float, 2 x float, or 3 x float vector is split by
278*0b57cec5SDimitry Andric  // target lowering, and is returned in 1-3 sse regs.
279*0b57cec5SDimitry Andric  CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
280*0b57cec5SDimitry Andric  CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
281*0b57cec5SDimitry Andric
282*0b57cec5SDimitry Andric  // For integers, ECX can be used as an extra return register
283*0b57cec5SDimitry Andric  CCIfType<[i8],  CCAssignToReg<[AL, DL, CL]>>,
284*0b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
285*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
286*0b57cec5SDimitry Andric
287*0b57cec5SDimitry Andric  // Otherwise, it is the same as the common X86 calling convention.
288*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
289*0b57cec5SDimitry Andric]>;
290*0b57cec5SDimitry Andric
291*0b57cec5SDimitry Andric// Intel_OCL_BI return-value convention.
292*0b57cec5SDimitry Andricdef RetCC_Intel_OCL_BI : CallingConv<[
293*0b57cec5SDimitry Andric  // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
294*0b57cec5SDimitry Andric  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
295*0b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
296*0b57cec5SDimitry Andric
297*0b57cec5SDimitry Andric  // 256-bit FP vectors
298*0b57cec5SDimitry Andric  // No more than 4 registers
299*0b57cec5SDimitry Andric  CCIfType<[v8f32, v4f64, v8i32, v4i64],
300*0b57cec5SDimitry Andric            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
301*0b57cec5SDimitry Andric
302*0b57cec5SDimitry Andric  // 512-bit FP vectors
303*0b57cec5SDimitry Andric  CCIfType<[v16f32, v8f64, v16i32, v8i64],
304*0b57cec5SDimitry Andric            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
305*0b57cec5SDimitry Andric
306*0b57cec5SDimitry Andric  // i32, i64 in the standard way
307*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
308*0b57cec5SDimitry Andric]>;
309*0b57cec5SDimitry Andric
310*0b57cec5SDimitry Andric// X86-32 HiPE return-value convention.
311*0b57cec5SDimitry Andricdef RetCC_X86_32_HiPE : CallingConv<[
312*0b57cec5SDimitry Andric  // Promote all types to i32
313*0b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
314*0b57cec5SDimitry Andric
315*0b57cec5SDimitry Andric  // Return: HP, P, VAL1, VAL2
316*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
317*0b57cec5SDimitry Andric]>;
318*0b57cec5SDimitry Andric
319*0b57cec5SDimitry Andric// X86-32 Vectorcall return-value convention.
320*0b57cec5SDimitry Andricdef RetCC_X86_32_VectorCall : CallingConv<[
321*0b57cec5SDimitry Andric  // Floating Point types are returned in XMM0,XMM1,XMMM2 and XMM3.
322*0b57cec5SDimitry Andric  CCIfType<[f32, f64, f128],
323*0b57cec5SDimitry Andric            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
324*0b57cec5SDimitry Andric
325*0b57cec5SDimitry Andric  // Return integers in the standard way.
326*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
327*0b57cec5SDimitry Andric]>;
328*0b57cec5SDimitry Andric
329*0b57cec5SDimitry Andric// X86-64 C return-value convention.
330*0b57cec5SDimitry Andricdef RetCC_X86_64_C : CallingConv<[
331*0b57cec5SDimitry Andric  // The X86-64 calling convention always returns FP values in XMM0.
332*0b57cec5SDimitry Andric  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
333*0b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
334*0b57cec5SDimitry Andric  CCIfType<[f128], CCAssignToReg<[XMM0, XMM1]>>,
335*0b57cec5SDimitry Andric
336*0b57cec5SDimitry Andric  // MMX vector types are always returned in XMM0.
337*0b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
338*0b57cec5SDimitry Andric
339*0b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
340*0b57cec5SDimitry Andric
341*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
342*0b57cec5SDimitry Andric]>;
343*0b57cec5SDimitry Andric
344*0b57cec5SDimitry Andric// X86-Win64 C return-value convention.
345*0b57cec5SDimitry Andricdef RetCC_X86_Win64_C : CallingConv<[
346*0b57cec5SDimitry Andric  // The X86-Win64 calling convention always returns __m64 values in RAX.
347*0b57cec5SDimitry Andric  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
348*0b57cec5SDimitry Andric
349*0b57cec5SDimitry Andric  // Otherwise, everything is the same as 'normal' X86-64 C CC.
350*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_64_C>
351*0b57cec5SDimitry Andric]>;
352*0b57cec5SDimitry Andric
353*0b57cec5SDimitry Andric// X86-64 vectorcall return-value convention.
354*0b57cec5SDimitry Andricdef RetCC_X86_64_Vectorcall : CallingConv<[
355*0b57cec5SDimitry Andric  // Vectorcall calling convention always returns FP values in XMMs.
356*0b57cec5SDimitry Andric  CCIfType<[f32, f64, f128],
357*0b57cec5SDimitry Andric    CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
358*0b57cec5SDimitry Andric
359*0b57cec5SDimitry Andric  // Otherwise, everything is the same as Windows X86-64 C CC.
360*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_Win64_C>
361*0b57cec5SDimitry Andric]>;
362*0b57cec5SDimitry Andric
363*0b57cec5SDimitry Andric// X86-64 HiPE return-value convention.
364*0b57cec5SDimitry Andricdef RetCC_X86_64_HiPE : CallingConv<[
365*0b57cec5SDimitry Andric  // Promote all types to i64
366*0b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
367*0b57cec5SDimitry Andric
368*0b57cec5SDimitry Andric  // Return: HP, P, VAL1, VAL2
369*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
370*0b57cec5SDimitry Andric]>;
371*0b57cec5SDimitry Andric
372*0b57cec5SDimitry Andric// X86-64 WebKit_JS return-value convention.
373*0b57cec5SDimitry Andricdef RetCC_X86_64_WebKit_JS : CallingConv<[
374*0b57cec5SDimitry Andric  // Promote all types to i64
375*0b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
376*0b57cec5SDimitry Andric
377*0b57cec5SDimitry Andric  // Return: RAX
378*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX]>>
379*0b57cec5SDimitry Andric]>;
380*0b57cec5SDimitry Andric
381*0b57cec5SDimitry Andricdef RetCC_X86_64_Swift : CallingConv<[
382*0b57cec5SDimitry Andric
383*0b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
384*0b57cec5SDimitry Andric
385*0b57cec5SDimitry Andric  // For integers, ECX, R8D can be used as extra return registers.
386*0b57cec5SDimitry Andric  CCIfType<[v1i1],  CCPromoteToType<i8>>,
387*0b57cec5SDimitry Andric  CCIfType<[i1],  CCPromoteToType<i8>>,
388*0b57cec5SDimitry Andric  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL, R8B]>>,
389*0b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToReg<[AX, DX, CX, R8W]>>,
390*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX, R8D]>>,
391*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX, R8]>>,
392*0b57cec5SDimitry Andric
393*0b57cec5SDimitry Andric  // XMM0, XMM1, XMM2 and XMM3 can be used to return FP values.
394*0b57cec5SDimitry Andric  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
395*0b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
396*0b57cec5SDimitry Andric  CCIfType<[f128], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
397*0b57cec5SDimitry Andric
398*0b57cec5SDimitry Andric  // MMX vector types are returned in XMM0, XMM1, XMM2 and XMM3.
399*0b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
400*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86Common>
401*0b57cec5SDimitry Andric]>;
402*0b57cec5SDimitry Andric
403*0b57cec5SDimitry Andric// X86-64 AnyReg return-value convention. No explicit register is specified for
404*0b57cec5SDimitry Andric// the return-value. The register allocator is allowed and expected to choose
405*0b57cec5SDimitry Andric// any free register.
406*0b57cec5SDimitry Andric//
407*0b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and
408*0b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug
409*0b57cec5SDimitry Andric// builds. On Release builds we fallback to the X86 C calling convention.
410*0b57cec5SDimitry Andricdef RetCC_X86_64_AnyReg : CallingConv<[
411*0b57cec5SDimitry Andric  CCCustom<"CC_X86_AnyReg_Error">
412*0b57cec5SDimitry Andric]>;
413*0b57cec5SDimitry Andric
414*0b57cec5SDimitry Andric// X86-64 HHVM return-value convention.
415*0b57cec5SDimitry Andricdef RetCC_X86_64_HHVM: CallingConv<[
416*0b57cec5SDimitry Andric  // Promote all types to i64
417*0b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
418*0b57cec5SDimitry Andric
419*0b57cec5SDimitry Andric  // Return: could return in any GP register save RSP and R12.
420*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RBX, RBP, RDI, RSI, RDX, RCX, R8, R9,
421*0b57cec5SDimitry Andric                                 RAX, R10, R11, R13, R14, R15]>>
422*0b57cec5SDimitry Andric]>;
423*0b57cec5SDimitry Andric
424*0b57cec5SDimitry Andric
425*0b57cec5SDimitry Andricdefm X86_32_RegCall :
426*0b57cec5SDimitry Andric	 X86_RegCall_base<RC_X86_32_RegCall>;
427*0b57cec5SDimitry Andricdefm X86_Win64_RegCall :
428*0b57cec5SDimitry Andric     X86_RegCall_base<RC_X86_64_RegCall_Win>;
429*0b57cec5SDimitry Andricdefm X86_SysV64_RegCall :
430*0b57cec5SDimitry Andric     X86_RegCall_base<RC_X86_64_RegCall_SysV>;
431*0b57cec5SDimitry Andric
432*0b57cec5SDimitry Andric// This is the root return-value convention for the X86-32 backend.
433*0b57cec5SDimitry Andricdef RetCC_X86_32 : CallingConv<[
434*0b57cec5SDimitry Andric  // If FastCC, use RetCC_X86_32_Fast.
435*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
436*0b57cec5SDimitry Andric  // If HiPE, use RetCC_X86_32_HiPE.
437*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
438*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_32_VectorCall>>,
439*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_32_RegCall>>,
440*0b57cec5SDimitry Andric
441*0b57cec5SDimitry Andric  // Otherwise, use RetCC_X86_32_C.
442*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_32_C>
443*0b57cec5SDimitry Andric]>;
444*0b57cec5SDimitry Andric
445*0b57cec5SDimitry Andric// This is the root return-value convention for the X86-64 backend.
446*0b57cec5SDimitry Andricdef RetCC_X86_64 : CallingConv<[
447*0b57cec5SDimitry Andric  // HiPE uses RetCC_X86_64_HiPE
448*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
449*0b57cec5SDimitry Andric
450*0b57cec5SDimitry Andric  // Handle JavaScript calls.
451*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
452*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
453*0b57cec5SDimitry Andric
454*0b57cec5SDimitry Andric  // Handle Swift calls.
455*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Swift", CCDelegateTo<RetCC_X86_64_Swift>>,
456*0b57cec5SDimitry Andric
457*0b57cec5SDimitry Andric  // Handle explicit CC selection
458*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
459*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
460*0b57cec5SDimitry Andric
461*0b57cec5SDimitry Andric  // Handle Vectorcall CC
462*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_64_Vectorcall>>,
463*0b57cec5SDimitry Andric
464*0b57cec5SDimitry Andric  // Handle HHVM calls.
465*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::HHVM", CCDelegateTo<RetCC_X86_64_HHVM>>,
466*0b57cec5SDimitry Andric
467*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
468*0b57cec5SDimitry Andric          CCIfSubtarget<"isTargetWin64()",
469*0b57cec5SDimitry Andric                        CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
470*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<RetCC_X86_SysV64_RegCall>>,
471*0b57cec5SDimitry Andric
472*0b57cec5SDimitry Andric  // Mingw64 and native Win64 use Win64 CC
473*0b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
474*0b57cec5SDimitry Andric
475*0b57cec5SDimitry Andric  // Otherwise, drop to normal X86-64 CC
476*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_64_C>
477*0b57cec5SDimitry Andric]>;
478*0b57cec5SDimitry Andric
479*0b57cec5SDimitry Andric// This is the return-value convention used for the entire X86 backend.
480*0b57cec5SDimitry Andriclet Entry = 1 in
481*0b57cec5SDimitry Andricdef RetCC_X86 : CallingConv<[
482*0b57cec5SDimitry Andric
483*0b57cec5SDimitry Andric  // Check if this is the Intel OpenCL built-ins calling convention
484*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
485*0b57cec5SDimitry Andric
486*0b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
487*0b57cec5SDimitry Andric  CCDelegateTo<RetCC_X86_32>
488*0b57cec5SDimitry Andric]>;
489*0b57cec5SDimitry Andric
490*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
491*0b57cec5SDimitry Andric// X86-64 Argument Calling Conventions
492*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
493*0b57cec5SDimitry Andric
494*0b57cec5SDimitry Andricdef CC_X86_64_C : CallingConv<[
495*0b57cec5SDimitry Andric  // Handles byval parameters.
496*0b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<8, 8>>,
497*0b57cec5SDimitry Andric
498*0b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
499*0b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
500*0b57cec5SDimitry Andric
501*0b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in R10.
502*0b57cec5SDimitry Andric  CCIfNest<CCIfSubtarget<"isTarget64BitILP32()", CCAssignToReg<[R10D]>>>,
503*0b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[R10]>>,
504*0b57cec5SDimitry Andric
505*0b57cec5SDimitry Andric  // Pass SwiftSelf in a callee saved register.
506*0b57cec5SDimitry Andric  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[R13]>>>,
507*0b57cec5SDimitry Andric
508*0b57cec5SDimitry Andric  // A SwiftError is passed in R12.
509*0b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
510*0b57cec5SDimitry Andric
511*0b57cec5SDimitry Andric  // For Swift Calling Convention, pass sret in %rax.
512*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Swift",
513*0b57cec5SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
514*0b57cec5SDimitry Andric
515*0b57cec5SDimitry Andric  // The first 6 integer arguments are passed in integer registers.
516*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
517*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
518*0b57cec5SDimitry Andric
519*0b57cec5SDimitry Andric  // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
520*0b57cec5SDimitry Andric  CCIfType<[x86mmx],
521*0b57cec5SDimitry Andric            CCIfSubtarget<"isTargetDarwin()",
522*0b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE2()",
523*0b57cec5SDimitry Andric            CCPromoteToType<v2i64>>>>,
524*0b57cec5SDimitry Andric
525*0b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are passed in SIMD registers.
526*0b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
527*0b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
528*0b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
529*0b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
530*0b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
531*0b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
532*0b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
533*0b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
534*0b57cec5SDimitry Andric
535*0b57cec5SDimitry Andric  // The first 8 FP/Vector arguments are passed in XMM registers.
536*0b57cec5SDimitry Andric  CCIfType<[f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
537*0b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE1()",
538*0b57cec5SDimitry Andric            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
539*0b57cec5SDimitry Andric
540*0b57cec5SDimitry Andric  // The first 8 256-bit vector arguments are passed in YMM registers, unless
541*0b57cec5SDimitry Andric  // this is a vararg function.
542*0b57cec5SDimitry Andric  // FIXME: This isn't precisely correct; the x86-64 ABI document says that
543*0b57cec5SDimitry Andric  // fixed arguments to vararg functions are supposed to be passed in
544*0b57cec5SDimitry Andric  // registers.  Actually modeling that would be a lot of work, though.
545*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
546*0b57cec5SDimitry Andric                          CCIfSubtarget<"hasAVX()",
547*0b57cec5SDimitry Andric                          CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
548*0b57cec5SDimitry Andric                                         YMM4, YMM5, YMM6, YMM7]>>>>,
549*0b57cec5SDimitry Andric
550*0b57cec5SDimitry Andric  // The first 8 512-bit vector arguments are passed in ZMM registers.
551*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
552*0b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX512()",
553*0b57cec5SDimitry Andric            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
554*0b57cec5SDimitry Andric
555*0b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
556*0b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
557*0b57cec5SDimitry Andric  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
558*0b57cec5SDimitry Andric
559*0b57cec5SDimitry Andric  // Long doubles get stack slots whose size and alignment depends on the
560*0b57cec5SDimitry Andric  // subtarget.
561*0b57cec5SDimitry Andric  CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
562*0b57cec5SDimitry Andric
563*0b57cec5SDimitry Andric  // Vectors get 16-byte stack slots that are 16-byte aligned.
564*0b57cec5SDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
565*0b57cec5SDimitry Andric
566*0b57cec5SDimitry Andric  // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
567*0b57cec5SDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
568*0b57cec5SDimitry Andric           CCAssignToStack<32, 32>>,
569*0b57cec5SDimitry Andric
570*0b57cec5SDimitry Andric  // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
571*0b57cec5SDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
572*0b57cec5SDimitry Andric           CCAssignToStack<64, 64>>
573*0b57cec5SDimitry Andric]>;
574*0b57cec5SDimitry Andric
575*0b57cec5SDimitry Andric// Calling convention for X86-64 HHVM.
576*0b57cec5SDimitry Andricdef CC_X86_64_HHVM : CallingConv<[
577*0b57cec5SDimitry Andric  // Use all/any GP registers for args, except RSP.
578*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RBX, R12, RBP, R15,
579*0b57cec5SDimitry Andric                                 RDI, RSI, RDX, RCX, R8, R9,
580*0b57cec5SDimitry Andric                                 RAX, R10, R11, R13, R14]>>
581*0b57cec5SDimitry Andric]>;
582*0b57cec5SDimitry Andric
583*0b57cec5SDimitry Andric// Calling convention for helper functions in HHVM.
584*0b57cec5SDimitry Andricdef CC_X86_64_HHVM_C : CallingConv<[
585*0b57cec5SDimitry Andric  // Pass the first argument in RBP.
586*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RBP]>>,
587*0b57cec5SDimitry Andric
588*0b57cec5SDimitry Andric  // Otherwise it's the same as the regular C calling convention.
589*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_64_C>
590*0b57cec5SDimitry Andric]>;
591*0b57cec5SDimitry Andric
592*0b57cec5SDimitry Andric// Calling convention used on Win64
593*0b57cec5SDimitry Andricdef CC_X86_Win64_C : CallingConv<[
594*0b57cec5SDimitry Andric  // FIXME: Handle varargs.
595*0b57cec5SDimitry Andric
596*0b57cec5SDimitry Andric  // Byval aggregates are passed by pointer
597*0b57cec5SDimitry Andric  CCIfByVal<CCPassIndirect<i64>>,
598*0b57cec5SDimitry Andric
599*0b57cec5SDimitry Andric  // Promote i1/v1i1 arguments to i8.
600*0b57cec5SDimitry Andric  CCIfType<[i1, v1i1], CCPromoteToType<i8>>,
601*0b57cec5SDimitry Andric
602*0b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in R10.
603*0b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[R10]>>,
604*0b57cec5SDimitry Andric
605*0b57cec5SDimitry Andric  // A SwiftError is passed in R12.
606*0b57cec5SDimitry Andric  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
607*0b57cec5SDimitry Andric
608*0b57cec5SDimitry Andric  // 128 bit vectors are passed by pointer
609*0b57cec5SDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
610*0b57cec5SDimitry Andric
611*0b57cec5SDimitry Andric
612*0b57cec5SDimitry Andric  // 256 bit vectors are passed by pointer
613*0b57cec5SDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
614*0b57cec5SDimitry Andric
615*0b57cec5SDimitry Andric  // 512 bit vectors are passed by pointer
616*0b57cec5SDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
617*0b57cec5SDimitry Andric
618*0b57cec5SDimitry Andric  // Long doubles are passed by pointer
619*0b57cec5SDimitry Andric  CCIfType<[f80], CCPassIndirect<i64>>,
620*0b57cec5SDimitry Andric
621*0b57cec5SDimitry Andric  // The first 4 MMX vector arguments are passed in GPRs.
622*0b57cec5SDimitry Andric  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
623*0b57cec5SDimitry Andric
624*0b57cec5SDimitry Andric  // The first 4 integer arguments are passed in integer registers.
625*0b57cec5SDimitry Andric  CCIfType<[i8 ], CCAssignToRegWithShadow<[CL  , DL  , R8B , R9B ],
626*0b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
627*0b57cec5SDimitry Andric  CCIfType<[i16], CCAssignToRegWithShadow<[CX  , DX  , R8W , R9W ],
628*0b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
629*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
630*0b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
631*0b57cec5SDimitry Andric
632*0b57cec5SDimitry Andric  // Do not pass the sret argument in RCX, the Win64 thiscall calling
633*0b57cec5SDimitry Andric  // convention requires "this" to be passed in RCX.
634*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_ThisCall",
635*0b57cec5SDimitry Andric    CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8  , R9  ],
636*0b57cec5SDimitry Andric                                                     [XMM1, XMM2, XMM3]>>>>,
637*0b57cec5SDimitry Andric
638*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8  , R9  ],
639*0b57cec5SDimitry Andric                                          [XMM0, XMM1, XMM2, XMM3]>>,
640*0b57cec5SDimitry Andric
641*0b57cec5SDimitry Andric  // The first 4 FP/Vector arguments are passed in XMM registers.
642*0b57cec5SDimitry Andric  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
643*0b57cec5SDimitry Andric           CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
644*0b57cec5SDimitry Andric                                   [RCX , RDX , R8  , R9  ]>>,
645*0b57cec5SDimitry Andric
646*0b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
647*0b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
648*0b57cec5SDimitry Andric  CCIfType<[i8, i16, i32, i64, f32, f64], CCAssignToStack<8, 8>>
649*0b57cec5SDimitry Andric]>;
650*0b57cec5SDimitry Andric
651*0b57cec5SDimitry Andricdef CC_X86_Win64_VectorCall : CallingConv<[
652*0b57cec5SDimitry Andric  CCCustom<"CC_X86_64_VectorCall">,
653*0b57cec5SDimitry Andric
654*0b57cec5SDimitry Andric  // Delegate to fastcall to handle integer types.
655*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_Win64_C>
656*0b57cec5SDimitry Andric]>;
657*0b57cec5SDimitry Andric
658*0b57cec5SDimitry Andric
659*0b57cec5SDimitry Andricdef CC_X86_64_GHC : CallingConv<[
660*0b57cec5SDimitry Andric  // Promote i8/i16/i32 arguments to i64.
661*0b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
662*0b57cec5SDimitry Andric
663*0b57cec5SDimitry Andric  // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
664*0b57cec5SDimitry Andric  CCIfType<[i64],
665*0b57cec5SDimitry Andric            CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
666*0b57cec5SDimitry Andric
667*0b57cec5SDimitry Andric  // Pass in STG registers: F1, F2, F3, F4, D1, D2
668*0b57cec5SDimitry Andric  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
669*0b57cec5SDimitry Andric            CCIfSubtarget<"hasSSE1()",
670*0b57cec5SDimitry Andric            CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>,
671*0b57cec5SDimitry Andric  // AVX
672*0b57cec5SDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
673*0b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX()",
674*0b57cec5SDimitry Andric            CCAssignToReg<[YMM1, YMM2, YMM3, YMM4, YMM5, YMM6]>>>,
675*0b57cec5SDimitry Andric  // AVX-512
676*0b57cec5SDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
677*0b57cec5SDimitry Andric            CCIfSubtarget<"hasAVX512()",
678*0b57cec5SDimitry Andric            CCAssignToReg<[ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6]>>>
679*0b57cec5SDimitry Andric]>;
680*0b57cec5SDimitry Andric
681*0b57cec5SDimitry Andricdef CC_X86_64_HiPE : CallingConv<[
682*0b57cec5SDimitry Andric  // Promote i8/i16/i32 arguments to i64.
683*0b57cec5SDimitry Andric  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
684*0b57cec5SDimitry Andric
685*0b57cec5SDimitry Andric  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
686*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
687*0b57cec5SDimitry Andric
688*0b57cec5SDimitry Andric  // Integer/FP values get stored in stack slots that are 8 bytes in size and
689*0b57cec5SDimitry Andric  // 8-byte aligned if there are no more registers to hold them.
690*0b57cec5SDimitry Andric  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
691*0b57cec5SDimitry Andric]>;
692*0b57cec5SDimitry Andric
693*0b57cec5SDimitry Andricdef CC_X86_64_WebKit_JS : CallingConv<[
694*0b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
695*0b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
696*0b57cec5SDimitry Andric
697*0b57cec5SDimitry Andric  // Only the first integer argument is passed in register.
698*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EAX]>>,
699*0b57cec5SDimitry Andric  CCIfType<[i64], CCAssignToReg<[RAX]>>,
700*0b57cec5SDimitry Andric
701*0b57cec5SDimitry Andric  // The remaining integer arguments are passed on the stack. 32bit integer and
702*0b57cec5SDimitry Andric  // floating-point arguments are aligned to 4 byte and stored in 4 byte slots.
703*0b57cec5SDimitry Andric  // 64bit integer and floating-point arguments are aligned to 8 byte and stored
704*0b57cec5SDimitry Andric  // in 8 byte stack slots.
705*0b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
706*0b57cec5SDimitry Andric  CCIfType<[i64, f64], CCAssignToStack<8, 8>>
707*0b57cec5SDimitry Andric]>;
708*0b57cec5SDimitry Andric
709*0b57cec5SDimitry Andric// No explicit register is specified for the AnyReg calling convention. The
710*0b57cec5SDimitry Andric// register allocator may assign the arguments to any free register.
711*0b57cec5SDimitry Andric//
712*0b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and
713*0b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug
714*0b57cec5SDimitry Andric// builds. On Release builds we fallback to the X86 C calling convention.
715*0b57cec5SDimitry Andricdef CC_X86_64_AnyReg : CallingConv<[
716*0b57cec5SDimitry Andric  CCCustom<"CC_X86_AnyReg_Error">
717*0b57cec5SDimitry Andric]>;
718*0b57cec5SDimitry Andric
719*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
720*0b57cec5SDimitry Andric// X86 C Calling Convention
721*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
722*0b57cec5SDimitry Andric
723*0b57cec5SDimitry Andric/// CC_X86_32_Vector_Common - In all X86-32 calling conventions, extra vector
724*0b57cec5SDimitry Andric/// values are spilled on the stack.
725*0b57cec5SDimitry Andricdef CC_X86_32_Vector_Common : CallingConv<[
726*0b57cec5SDimitry Andric  // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
727*0b57cec5SDimitry Andric  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
728*0b57cec5SDimitry Andric
729*0b57cec5SDimitry Andric  // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
730*0b57cec5SDimitry Andric  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
731*0b57cec5SDimitry Andric           CCAssignToStack<32, 32>>,
732*0b57cec5SDimitry Andric
733*0b57cec5SDimitry Andric  // 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
734*0b57cec5SDimitry Andric  CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
735*0b57cec5SDimitry Andric           CCAssignToStack<64, 64>>
736*0b57cec5SDimitry Andric]>;
737*0b57cec5SDimitry Andric
738*0b57cec5SDimitry Andric// CC_X86_32_Vector_Standard - The first 3 vector arguments are passed in
739*0b57cec5SDimitry Andric// vector registers
740*0b57cec5SDimitry Andricdef CC_X86_32_Vector_Standard : CallingConv<[
741*0b57cec5SDimitry Andric  // SSE vector arguments are passed in XMM registers.
742*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
743*0b57cec5SDimitry Andric                CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
744*0b57cec5SDimitry Andric
745*0b57cec5SDimitry Andric  // AVX 256-bit vector arguments are passed in YMM registers.
746*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
747*0b57cec5SDimitry Andric                CCIfSubtarget<"hasAVX()",
748*0b57cec5SDimitry Andric                CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
749*0b57cec5SDimitry Andric
750*0b57cec5SDimitry Andric  // AVX 512-bit vector arguments are passed in ZMM registers.
751*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
752*0b57cec5SDimitry Andric                CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
753*0b57cec5SDimitry Andric
754*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Common>
755*0b57cec5SDimitry Andric]>;
756*0b57cec5SDimitry Andric
757*0b57cec5SDimitry Andric// CC_X86_32_Vector_Darwin - The first 4 vector arguments are passed in
758*0b57cec5SDimitry Andric// vector registers.
759*0b57cec5SDimitry Andricdef CC_X86_32_Vector_Darwin : CallingConv<[
760*0b57cec5SDimitry Andric  // SSE vector arguments are passed in XMM registers.
761*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
762*0b57cec5SDimitry Andric                CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
763*0b57cec5SDimitry Andric
764*0b57cec5SDimitry Andric  // AVX 256-bit vector arguments are passed in YMM registers.
765*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
766*0b57cec5SDimitry Andric                CCIfSubtarget<"hasAVX()",
767*0b57cec5SDimitry Andric                CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
768*0b57cec5SDimitry Andric
769*0b57cec5SDimitry Andric  // AVX 512-bit vector arguments are passed in ZMM registers.
770*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64],
771*0b57cec5SDimitry Andric                CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
772*0b57cec5SDimitry Andric
773*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Common>
774*0b57cec5SDimitry Andric]>;
775*0b57cec5SDimitry Andric
776*0b57cec5SDimitry Andric/// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
777*0b57cec5SDimitry Andric/// values are spilled on the stack.
778*0b57cec5SDimitry Andricdef CC_X86_32_Common : CallingConv<[
779*0b57cec5SDimitry Andric  // Handles byval parameters.
780*0b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
781*0b57cec5SDimitry Andric
782*0b57cec5SDimitry Andric  // The first 3 float or double arguments, if marked 'inreg' and if the call
783*0b57cec5SDimitry Andric  // is not a vararg call and if SSE2 is available, are passed in SSE registers.
784*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
785*0b57cec5SDimitry Andric                CCIfSubtarget<"hasSSE2()",
786*0b57cec5SDimitry Andric                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
787*0b57cec5SDimitry Andric
788*0b57cec5SDimitry Andric  // The first 3 __m64 vector arguments are passed in mmx registers if the
789*0b57cec5SDimitry Andric  // call is not a vararg call.
790*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[x86mmx],
791*0b57cec5SDimitry Andric                CCAssignToReg<[MM0, MM1, MM2]>>>,
792*0b57cec5SDimitry Andric
793*0b57cec5SDimitry Andric  // Integer/Float values get stored in stack slots that are 4 bytes in
794*0b57cec5SDimitry Andric  // size and 4-byte aligned.
795*0b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
796*0b57cec5SDimitry Andric
797*0b57cec5SDimitry Andric  // Doubles get 8-byte slots that are 4-byte aligned.
798*0b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToStack<8, 4>>,
799*0b57cec5SDimitry Andric
800*0b57cec5SDimitry Andric  // Long doubles get slots whose size depends on the subtarget.
801*0b57cec5SDimitry Andric  CCIfType<[f80], CCAssignToStack<0, 4>>,
802*0b57cec5SDimitry Andric
803*0b57cec5SDimitry Andric  // Boolean vectors of AVX-512 are passed in SIMD registers.
804*0b57cec5SDimitry Andric  // The call from AVX to AVX-512 function should work,
805*0b57cec5SDimitry Andric  // since the boolean types in AVX/AVX2 are promoted by default.
806*0b57cec5SDimitry Andric  CCIfType<[v2i1],  CCPromoteToType<v2i64>>,
807*0b57cec5SDimitry Andric  CCIfType<[v4i1],  CCPromoteToType<v4i32>>,
808*0b57cec5SDimitry Andric  CCIfType<[v8i1],  CCPromoteToType<v8i16>>,
809*0b57cec5SDimitry Andric  CCIfType<[v16i1], CCPromoteToType<v16i8>>,
810*0b57cec5SDimitry Andric  CCIfType<[v32i1], CCPromoteToType<v32i8>>,
811*0b57cec5SDimitry Andric  CCIfType<[v64i1], CCPromoteToType<v64i8>>,
812*0b57cec5SDimitry Andric
813*0b57cec5SDimitry Andric  // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
814*0b57cec5SDimitry Andric  // passed in the parameter area.
815*0b57cec5SDimitry Andric  CCIfType<[x86mmx], CCAssignToStack<8, 4>>,
816*0b57cec5SDimitry Andric
817*0b57cec5SDimitry Andric  // Darwin passes vectors in a form that differs from the i386 psABI
818*0b57cec5SDimitry Andric  CCIfSubtarget<"isTargetDarwin()", CCDelegateTo<CC_X86_32_Vector_Darwin>>,
819*0b57cec5SDimitry Andric
820*0b57cec5SDimitry Andric  // Otherwise, drop to 'normal' X86-32 CC
821*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Vector_Standard>
822*0b57cec5SDimitry Andric]>;
823*0b57cec5SDimitry Andric
824*0b57cec5SDimitry Andricdef CC_X86_32_C : CallingConv<[
825*0b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
826*0b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
827*0b57cec5SDimitry Andric
828*0b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in ECX.
829*0b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[ECX]>>,
830*0b57cec5SDimitry Andric
831*0b57cec5SDimitry Andric  // The first 3 integer arguments, if marked 'inreg' and if the call is not
832*0b57cec5SDimitry Andric  // a vararg call, are passed in integer registers.
833*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
834*0b57cec5SDimitry Andric
835*0b57cec5SDimitry Andric  // Otherwise, same as everything else.
836*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
837*0b57cec5SDimitry Andric]>;
838*0b57cec5SDimitry Andric
839*0b57cec5SDimitry Andricdef CC_X86_32_MCU : CallingConv<[
840*0b57cec5SDimitry Andric  // Handles byval parameters.  Note that, like FastCC, we can't rely on
841*0b57cec5SDimitry Andric  // the delegation to CC_X86_32_Common because that happens after code that
842*0b57cec5SDimitry Andric  // puts arguments in registers.
843*0b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
844*0b57cec5SDimitry Andric
845*0b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
846*0b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
847*0b57cec5SDimitry Andric
848*0b57cec5SDimitry Andric  // If the call is not a vararg call, some arguments may be passed
849*0b57cec5SDimitry Andric  // in integer registers.
850*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[i32], CCCustom<"CC_X86_32_MCUInReg">>>,
851*0b57cec5SDimitry Andric
852*0b57cec5SDimitry Andric  // Otherwise, same as everything else.
853*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
854*0b57cec5SDimitry Andric]>;
855*0b57cec5SDimitry Andric
856*0b57cec5SDimitry Andricdef CC_X86_32_FastCall : CallingConv<[
857*0b57cec5SDimitry Andric  // Promote i1 to i8.
858*0b57cec5SDimitry Andric  CCIfType<[i1], CCPromoteToType<i8>>,
859*0b57cec5SDimitry Andric
860*0b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in EAX.
861*0b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[EAX]>>,
862*0b57cec5SDimitry Andric
863*0b57cec5SDimitry Andric  // The first 2 integer arguments are passed in ECX/EDX
864*0b57cec5SDimitry Andric  CCIfInReg<CCIfType<[ i8], CCAssignToReg<[ CL,  DL]>>>,
865*0b57cec5SDimitry Andric  CCIfInReg<CCIfType<[i16], CCAssignToReg<[ CX,  DX]>>>,
866*0b57cec5SDimitry Andric  CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
867*0b57cec5SDimitry Andric
868*0b57cec5SDimitry Andric  // Otherwise, same as everything else.
869*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
870*0b57cec5SDimitry Andric]>;
871*0b57cec5SDimitry Andric
872*0b57cec5SDimitry Andricdef CC_X86_Win32_VectorCall : CallingConv<[
873*0b57cec5SDimitry Andric  // Pass floating point in XMMs
874*0b57cec5SDimitry Andric  CCCustom<"CC_X86_32_VectorCall">,
875*0b57cec5SDimitry Andric
876*0b57cec5SDimitry Andric  // Delegate to fastcall to handle integer types.
877*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_FastCall>
878*0b57cec5SDimitry Andric]>;
879*0b57cec5SDimitry Andric
880*0b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Common : CallingConv<[
881*0b57cec5SDimitry Andric  // The first integer argument is passed in ECX
882*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX]>>,
883*0b57cec5SDimitry Andric
884*0b57cec5SDimitry Andric  // Otherwise, same as everything else.
885*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
886*0b57cec5SDimitry Andric]>;
887*0b57cec5SDimitry Andric
888*0b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Mingw : CallingConv<[
889*0b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
890*0b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
891*0b57cec5SDimitry Andric
892*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Common>
893*0b57cec5SDimitry Andric]>;
894*0b57cec5SDimitry Andric
895*0b57cec5SDimitry Andricdef CC_X86_32_ThisCall_Win : CallingConv<[
896*0b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
897*0b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
898*0b57cec5SDimitry Andric
899*0b57cec5SDimitry Andric  // Pass sret arguments indirectly through stack.
900*0b57cec5SDimitry Andric  CCIfSRet<CCAssignToStack<4, 4>>,
901*0b57cec5SDimitry Andric
902*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Common>
903*0b57cec5SDimitry Andric]>;
904*0b57cec5SDimitry Andric
905*0b57cec5SDimitry Andricdef CC_X86_32_ThisCall : CallingConv<[
906*0b57cec5SDimitry Andric  CCIfSubtarget<"isTargetCygMing()", CCDelegateTo<CC_X86_32_ThisCall_Mingw>>,
907*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_ThisCall_Win>
908*0b57cec5SDimitry Andric]>;
909*0b57cec5SDimitry Andric
910*0b57cec5SDimitry Andricdef CC_X86_32_FastCC : CallingConv<[
911*0b57cec5SDimitry Andric  // Handles byval parameters.  Note that we can't rely on the delegation
912*0b57cec5SDimitry Andric  // to CC_X86_32_Common for this because that happens after code that
913*0b57cec5SDimitry Andric  // puts arguments in registers.
914*0b57cec5SDimitry Andric  CCIfByVal<CCPassByVal<4, 4>>,
915*0b57cec5SDimitry Andric
916*0b57cec5SDimitry Andric  // Promote i1/i8/i16/v1i1 arguments to i32.
917*0b57cec5SDimitry Andric  CCIfType<[i1, i8, i16, v1i1], CCPromoteToType<i32>>,
918*0b57cec5SDimitry Andric
919*0b57cec5SDimitry Andric  // The 'nest' parameter, if any, is passed in EAX.
920*0b57cec5SDimitry Andric  CCIfNest<CCAssignToReg<[EAX]>>,
921*0b57cec5SDimitry Andric
922*0b57cec5SDimitry Andric  // The first 2 integer arguments are passed in ECX/EDX
923*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
924*0b57cec5SDimitry Andric
925*0b57cec5SDimitry Andric  // The first 3 float or double arguments, if the call is not a vararg
926*0b57cec5SDimitry Andric  // call and if SSE2 is available, are passed in SSE registers.
927*0b57cec5SDimitry Andric  CCIfNotVarArg<CCIfType<[f32,f64],
928*0b57cec5SDimitry Andric                CCIfSubtarget<"hasSSE2()",
929*0b57cec5SDimitry Andric                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
930*0b57cec5SDimitry Andric
931*0b57cec5SDimitry Andric  // Doubles get 8-byte slots that are 8-byte aligned.
932*0b57cec5SDimitry Andric  CCIfType<[f64], CCAssignToStack<8, 8>>,
933*0b57cec5SDimitry Andric
934*0b57cec5SDimitry Andric  // Otherwise, same as everything else.
935*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_Common>
936*0b57cec5SDimitry Andric]>;
937*0b57cec5SDimitry Andric
938*0b57cec5SDimitry Andricdef CC_X86_32_GHC : CallingConv<[
939*0b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
940*0b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
941*0b57cec5SDimitry Andric
942*0b57cec5SDimitry Andric  // Pass in STG registers: Base, Sp, Hp, R1
943*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
944*0b57cec5SDimitry Andric]>;
945*0b57cec5SDimitry Andric
946*0b57cec5SDimitry Andricdef CC_X86_32_HiPE : CallingConv<[
947*0b57cec5SDimitry Andric  // Promote i8/i16 arguments to i32.
948*0b57cec5SDimitry Andric  CCIfType<[i8, i16], CCPromoteToType<i32>>,
949*0b57cec5SDimitry Andric
950*0b57cec5SDimitry Andric  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
951*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
952*0b57cec5SDimitry Andric
953*0b57cec5SDimitry Andric  // Integer/Float values get stored in stack slots that are 4 bytes in
954*0b57cec5SDimitry Andric  // size and 4-byte aligned.
955*0b57cec5SDimitry Andric  CCIfType<[i32, f32], CCAssignToStack<4, 4>>
956*0b57cec5SDimitry Andric]>;
957*0b57cec5SDimitry Andric
958*0b57cec5SDimitry Andric// X86-64 Intel OpenCL built-ins calling convention.
959*0b57cec5SDimitry Andricdef CC_Intel_OCL_BI : CallingConv<[
960*0b57cec5SDimitry Andric
961*0b57cec5SDimitry Andric  CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
962*0b57cec5SDimitry Andric  CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8,  R9 ]>>>,
963*0b57cec5SDimitry Andric
964*0b57cec5SDimitry Andric  CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
965*0b57cec5SDimitry Andric  CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
966*0b57cec5SDimitry Andric
967*0b57cec5SDimitry Andric  CCIfType<[i32], CCAssignToStack<4, 4>>,
968*0b57cec5SDimitry Andric
969*0b57cec5SDimitry Andric  // The SSE vector arguments are passed in XMM registers.
970*0b57cec5SDimitry Andric  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
971*0b57cec5SDimitry Andric           CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
972*0b57cec5SDimitry Andric
973*0b57cec5SDimitry Andric  // The 256-bit vector arguments are passed in YMM registers.
974*0b57cec5SDimitry Andric  CCIfType<[v8f32, v4f64, v8i32, v4i64],
975*0b57cec5SDimitry Andric           CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
976*0b57cec5SDimitry Andric
977*0b57cec5SDimitry Andric  // The 512-bit vector arguments are passed in ZMM registers.
978*0b57cec5SDimitry Andric  CCIfType<[v16f32, v8f64, v16i32, v8i64],
979*0b57cec5SDimitry Andric           CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
980*0b57cec5SDimitry Andric
981*0b57cec5SDimitry Andric  // Pass masks in mask registers
982*0b57cec5SDimitry Andric  CCIfType<[v16i1, v8i1], CCAssignToReg<[K1]>>,
983*0b57cec5SDimitry Andric
984*0b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
985*0b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()",       CCDelegateTo<CC_X86_64_C>>,
986*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_C>
987*0b57cec5SDimitry Andric]>;
988*0b57cec5SDimitry Andric
989*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
990*0b57cec5SDimitry Andric// X86 Root Argument Calling Conventions
991*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
992*0b57cec5SDimitry Andric
993*0b57cec5SDimitry Andric// This is the root argument convention for the X86-32 backend.
994*0b57cec5SDimitry Andricdef CC_X86_32 : CallingConv<[
995*0b57cec5SDimitry Andric  // X86_INTR calling convention is valid in MCU target and should override the
996*0b57cec5SDimitry Andric  // MCU calling convention. Thus, this should be checked before isTargetMCU().
997*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
998*0b57cec5SDimitry Andric  CCIfSubtarget<"isTargetMCU()", CCDelegateTo<CC_X86_32_MCU>>,
999*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
1000*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win32_VectorCall>>,
1001*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
1002*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
1003*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
1004*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
1005*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_32_RegCall>>,
1006*0b57cec5SDimitry Andric
1007*0b57cec5SDimitry Andric  // Otherwise, drop to normal X86-32 CC
1008*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32_C>
1009*0b57cec5SDimitry Andric]>;
1010*0b57cec5SDimitry Andric
1011*0b57cec5SDimitry Andric// This is the root argument convention for the X86-64 backend.
1012*0b57cec5SDimitry Andricdef CC_X86_64 : CallingConv<[
1013*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
1014*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
1015*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>,
1016*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
1017*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
1018*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
1019*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
1020*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>,
1021*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>,
1022*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall",
1023*0b57cec5SDimitry Andric    CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
1024*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
1025*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::X86_INTR", CCCustom<"CC_X86_Intr">>,
1026*0b57cec5SDimitry Andric
1027*0b57cec5SDimitry Andric  // Mingw64 and native Win64 use Win64 CC
1028*0b57cec5SDimitry Andric  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
1029*0b57cec5SDimitry Andric
1030*0b57cec5SDimitry Andric  // Otherwise, drop to normal X86-64 CC
1031*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_64_C>
1032*0b57cec5SDimitry Andric]>;
1033*0b57cec5SDimitry Andric
1034*0b57cec5SDimitry Andric// This is the argument convention used for the entire X86 backend.
1035*0b57cec5SDimitry Andriclet Entry = 1 in
1036*0b57cec5SDimitry Andricdef CC_X86 : CallingConv<[
1037*0b57cec5SDimitry Andric  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
1038*0b57cec5SDimitry Andric  CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
1039*0b57cec5SDimitry Andric  CCDelegateTo<CC_X86_32>
1040*0b57cec5SDimitry Andric]>;
1041*0b57cec5SDimitry Andric
1042*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
1043*0b57cec5SDimitry Andric// Callee-saved Registers.
1044*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===//
1045*0b57cec5SDimitry Andric
1046*0b57cec5SDimitry Andricdef CSR_NoRegs : CalleeSavedRegs<(add)>;
1047*0b57cec5SDimitry Andric
1048*0b57cec5SDimitry Andricdef CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
1049*0b57cec5SDimitry Andricdef CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
1050*0b57cec5SDimitry Andric
1051*0b57cec5SDimitry Andricdef CSR_64_SwiftError : CalleeSavedRegs<(sub CSR_64, R12)>;
1052*0b57cec5SDimitry Andric
1053*0b57cec5SDimitry Andricdef CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
1054*0b57cec5SDimitry Andricdef CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
1055*0b57cec5SDimitry Andric
1056*0b57cec5SDimitry Andricdef CSR_Win64_NoSSE : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15)>;
1057*0b57cec5SDimitry Andric
1058*0b57cec5SDimitry Andricdef CSR_Win64 : CalleeSavedRegs<(add CSR_Win64_NoSSE,
1059*0b57cec5SDimitry Andric                                     (sequence "XMM%u", 6, 15))>;
1060*0b57cec5SDimitry Andric
1061*0b57cec5SDimitry Andricdef CSR_Win64_SwiftError : CalleeSavedRegs<(sub CSR_Win64, R12)>;
1062*0b57cec5SDimitry Andric
1063*0b57cec5SDimitry Andric// The function used by Darwin to obtain the address of a thread-local variable
1064*0b57cec5SDimitry Andric// uses rdi to pass a single parameter and rax for the return value. All other
1065*0b57cec5SDimitry Andric// GPRs are preserved.
1066*0b57cec5SDimitry Andricdef CSR_64_TLS_Darwin : CalleeSavedRegs<(add CSR_64, RCX, RDX, RSI,
1067*0b57cec5SDimitry Andric                                             R8, R9, R10, R11)>;
1068*0b57cec5SDimitry Andric
1069*0b57cec5SDimitry Andric// CSRs that are handled by prologue, epilogue.
1070*0b57cec5SDimitry Andricdef CSR_64_CXX_TLS_Darwin_PE : CalleeSavedRegs<(add RBP)>;
1071*0b57cec5SDimitry Andric
1072*0b57cec5SDimitry Andric// CSRs that are handled explicitly via copies.
1073*0b57cec5SDimitry Andricdef CSR_64_CXX_TLS_Darwin_ViaCopy : CalleeSavedRegs<(sub CSR_64_TLS_Darwin, RBP)>;
1074*0b57cec5SDimitry Andric
1075*0b57cec5SDimitry Andric// All GPRs - except r11
1076*0b57cec5SDimitry Andricdef CSR_64_RT_MostRegs : CalleeSavedRegs<(add CSR_64, RAX, RCX, RDX, RSI, RDI,
1077*0b57cec5SDimitry Andric                                              R8, R9, R10, RSP)>;
1078*0b57cec5SDimitry Andric
1079*0b57cec5SDimitry Andric// All registers - except r11
1080*0b57cec5SDimitry Andricdef CSR_64_RT_AllRegs     : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
1081*0b57cec5SDimitry Andric                                                 (sequence "XMM%u", 0, 15))>;
1082*0b57cec5SDimitry Andricdef CSR_64_RT_AllRegs_AVX : CalleeSavedRegs<(add CSR_64_RT_MostRegs,
1083*0b57cec5SDimitry Andric                                                 (sequence "YMM%u", 0, 15))>;
1084*0b57cec5SDimitry Andric
1085*0b57cec5SDimitry Andricdef CSR_64_MostRegs : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
1086*0b57cec5SDimitry Andric                                           R11, R12, R13, R14, R15, RBP,
1087*0b57cec5SDimitry Andric                                           (sequence "XMM%u", 0, 15))>;
1088*0b57cec5SDimitry Andric
1089*0b57cec5SDimitry Andricdef CSR_32_AllRegs     : CalleeSavedRegs<(add EAX, EBX, ECX, EDX, EBP, ESI,
1090*0b57cec5SDimitry Andric                                              EDI)>;
1091*0b57cec5SDimitry Andricdef CSR_32_AllRegs_SSE : CalleeSavedRegs<(add CSR_32_AllRegs,
1092*0b57cec5SDimitry Andric                                              (sequence "XMM%u", 0, 7))>;
1093*0b57cec5SDimitry Andricdef CSR_32_AllRegs_AVX : CalleeSavedRegs<(add CSR_32_AllRegs,
1094*0b57cec5SDimitry Andric                                              (sequence "YMM%u", 0, 7))>;
1095*0b57cec5SDimitry Andricdef CSR_32_AllRegs_AVX512 : CalleeSavedRegs<(add CSR_32_AllRegs,
1096*0b57cec5SDimitry Andric                                                 (sequence "ZMM%u", 0, 7),
1097*0b57cec5SDimitry Andric                                                 (sequence "K%u", 0, 7))>;
1098*0b57cec5SDimitry Andric
1099*0b57cec5SDimitry Andricdef CSR_64_AllRegs     : CalleeSavedRegs<(add CSR_64_MostRegs, RAX)>;
1100*0b57cec5SDimitry Andricdef CSR_64_AllRegs_NoSSE : CalleeSavedRegs<(add RAX, RBX, RCX, RDX, RSI, RDI, R8, R9,
1101*0b57cec5SDimitry Andric                                                R10, R11, R12, R13, R14, R15, RBP)>;
1102*0b57cec5SDimitry Andricdef CSR_64_AllRegs_AVX : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
1103*0b57cec5SDimitry Andric                                                   (sequence "YMM%u", 0, 15)),
1104*0b57cec5SDimitry Andric                                              (sequence "XMM%u", 0, 15))>;
1105*0b57cec5SDimitry Andricdef CSR_64_AllRegs_AVX512 : CalleeSavedRegs<(sub (add CSR_64_MostRegs, RAX,
1106*0b57cec5SDimitry Andric                                                      (sequence "ZMM%u", 0, 31),
1107*0b57cec5SDimitry Andric                                                      (sequence "K%u", 0, 7)),
1108*0b57cec5SDimitry Andric                                                 (sequence "XMM%u", 0, 15))>;
1109*0b57cec5SDimitry Andric
1110*0b57cec5SDimitry Andric// Standard C + YMM6-15
1111*0b57cec5SDimitry Andricdef CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
1112*0b57cec5SDimitry Andric                                                  R13, R14, R15,
1113*0b57cec5SDimitry Andric                                                  (sequence "YMM%u", 6, 15))>;
1114*0b57cec5SDimitry Andric
1115*0b57cec5SDimitry Andricdef CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
1116*0b57cec5SDimitry Andric                                                     R12, R13, R14, R15,
1117*0b57cec5SDimitry Andric                                                     (sequence "ZMM%u", 6, 21),
1118*0b57cec5SDimitry Andric                                                     K4, K5, K6, K7)>;
1119*0b57cec5SDimitry Andric//Standard C + XMM 8-15
1120*0b57cec5SDimitry Andricdef CSR_64_Intel_OCL_BI       : CalleeSavedRegs<(add CSR_64,
1121*0b57cec5SDimitry Andric                                                 (sequence "XMM%u", 8, 15))>;
1122*0b57cec5SDimitry Andric
1123*0b57cec5SDimitry Andric//Standard C + YMM 8-15
1124*0b57cec5SDimitry Andricdef CSR_64_Intel_OCL_BI_AVX    : CalleeSavedRegs<(add CSR_64,
1125*0b57cec5SDimitry Andric                                                  (sequence "YMM%u", 8, 15))>;
1126*0b57cec5SDimitry Andric
1127*0b57cec5SDimitry Andricdef CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RDI, RSI, R14, R15,
1128*0b57cec5SDimitry Andric                                                  (sequence "ZMM%u", 16, 31),
1129*0b57cec5SDimitry Andric                                                  K4, K5, K6, K7)>;
1130*0b57cec5SDimitry Andric
1131*0b57cec5SDimitry Andric// Only R12 is preserved for PHP calls in HHVM.
1132*0b57cec5SDimitry Andricdef CSR_64_HHVM : CalleeSavedRegs<(add R12)>;
1133*0b57cec5SDimitry Andric
1134*0b57cec5SDimitry Andric// Register calling convention preserves few GPR and XMM8-15
1135*0b57cec5SDimitry Andricdef CSR_32_RegCall_NoSSE : CalleeSavedRegs<(add ESI, EDI, EBX, EBP, ESP)>;
1136*0b57cec5SDimitry Andricdef CSR_32_RegCall       : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE,
1137*0b57cec5SDimitry Andric                                           (sequence "XMM%u", 4, 7))>;
1138*0b57cec5SDimitry Andricdef CSR_Win64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP, RSP,
1139*0b57cec5SDimitry Andric                                              (sequence "R%u", 10, 15))>;
1140*0b57cec5SDimitry Andricdef CSR_Win64_RegCall       : CalleeSavedRegs<(add CSR_Win64_RegCall_NoSSE,
1141*0b57cec5SDimitry Andric                                              (sequence "XMM%u", 8, 15))>;
1142*0b57cec5SDimitry Andricdef CSR_SysV64_RegCall_NoSSE : CalleeSavedRegs<(add RBX, RBP, RSP,
1143*0b57cec5SDimitry Andric                                               (sequence "R%u", 12, 15))>;
1144*0b57cec5SDimitry Andricdef CSR_SysV64_RegCall       : CalleeSavedRegs<(add CSR_SysV64_RegCall_NoSSE,
1145*0b57cec5SDimitry Andric                                               (sequence "XMM%u", 8, 15))>;
1146*0b57cec5SDimitry Andric
1147