xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64CallingConvention.td (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This describes the calling conventions for AArch64 architecture.
10//
11//===----------------------------------------------------------------------===//
12
13/// CCIfBigEndian - Match only if we're in big endian mode.
14class CCIfBigEndian<CCAction A> :
15  CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>;
16
17class CCIfILP32<CCAction A> :
18  CCIf<"State.getMachineFunction().getDataLayout().getPointerSize() == 4", A>;
19
20
21//===----------------------------------------------------------------------===//
22// ARM AAPCS64 Calling Convention
23//===----------------------------------------------------------------------===//
24
25let Entry = 1 in
26def CC_AArch64_AAPCS : CallingConv<[
27  CCIfType<[iPTR], CCBitConvertToType<i64>>,
28  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
29  CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
30
31  // Big endian vectors must be passed as if they were 1-element vectors so that
32  // their lanes are in a consistent order.
33  CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8],
34                         CCBitConvertToType<f64>>>,
35  CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8],
36                         CCBitConvertToType<f128>>>,
37
38  // In AAPCS, an SRet is passed in X8, not X0 like a normal pointer parameter.
39  // However, on windows, in some circumstances, the SRet is passed in X0 or X1
40  // instead.  The presence of the inreg attribute indicates that SRet is
41  // passed in the alternative register (X0 or X1), not X8:
42  // - X0 for non-instance methods.
43  // - X1 for instance methods.
44
45  // The "sret" attribute identifies indirect returns.
46  // The "inreg" attribute identifies non-aggregate types.
47  // The position of the "sret" attribute identifies instance/non-instance
48  // methods.
49  // "sret" on argument 0 means non-instance methods.
50  // "sret" on argument 1 means instance methods.
51
52  CCIfInReg<CCIfType<[i64],
53    CCIfSRet<CCIfType<[i64], CCAssignToReg<[X0, X1]>>>>>,
54
55  CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>,
56
57  // Put ByVal arguments directly on the stack. Minimum size and alignment of a
58  // slot is 64-bit.
59  CCIfByVal<CCPassByVal<8, 8>>,
60
61  // The 'nest' parameter, if any, is passed in X18.
62  // Darwin uses X18 as the platform register and hence 'nest' isn't currently
63  // supported there.
64  CCIfNest<CCAssignToReg<[X18]>>,
65
66  // Pass SwiftSelf in a callee saved register.
67  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>,
68
69  // A SwiftError is passed in X21.
70  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>,
71
72  // Pass SwiftAsync in an otherwise callee saved register so that it will be
73  // preserved for normal function calls.
74  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>,
75
76  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
77
78  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
79            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
80           CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
81  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
82            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
83           CCPassIndirect<i64>>,
84
85  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
86           CCAssignToReg<[P0, P1, P2, P3]>>,
87  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
88           CCPassIndirect<i64>>,
89
90  // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
91  // up to eight each of GPR and FPR.
92  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
93  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>,
94  // i128 is split to two i64s, we can't fit half to register X7.
95  CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
96                                                    [X0, X1, X3, X5]>>>,
97
98  // i128 is split to two i64s, and its stack alignment is 16 bytes.
99  CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
100
101  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
102  CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
103  CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
104  CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
105  CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
106  CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
107           CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
108  CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
109           CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
110
111  // If more than will fit in registers, pass them on the stack instead.
112  CCIfType<[i1, i8, i16, f16, bf16], CCAssignToStack<8, 8>>,
113  CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
114  CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16],
115           CCAssignToStack<8, 8>>,
116  CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
117           CCAssignToStack<16, 16>>
118]>;
119
120let Entry = 1 in
121def RetCC_AArch64_AAPCS : CallingConv<[
122  CCIfType<[iPTR], CCBitConvertToType<i64>>,
123  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
124  CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
125
126  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
127  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>,
128
129  // Big endian vectors must be passed as if they were 1-element vectors so that
130  // their lanes are in a consistent order.
131  CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8],
132                         CCBitConvertToType<f64>>>,
133  CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8],
134                         CCBitConvertToType<f128>>>,
135
136  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
137  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>,
138  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
139  CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
140  CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
141  CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
142  CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
143  CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
144      CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
145  CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
146      CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
147
148  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
149            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
150           CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
151
152  CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount],
153           CCAssignToReg<[P0, P1, P2, P3]>>
154]>;
155
156// Vararg functions on windows pass floats in integer registers
157let Entry = 1 in
158def CC_AArch64_Win64_VarArg : CallingConv<[
159  CCIfType<[f16, bf16], CCBitConvertToType<i16>>,
160  CCIfType<[f32], CCBitConvertToType<i32>>,
161  CCIfType<[f64], CCBitConvertToType<i64>>,
162  CCDelegateTo<CC_AArch64_AAPCS>
163]>;
164
165// Vararg functions on Arm64EC ABI use a different convention, using
166// a stack layout compatible with the x64 calling convention.
167let Entry = 1 in
168def CC_AArch64_Arm64EC_VarArg : CallingConv<[
169  // Convert small floating-point values to integer.
170  CCIfType<[f16, bf16], CCBitConvertToType<i16>>,
171  CCIfType<[f32], CCBitConvertToType<i32>>,
172  CCIfType<[f64, v1f64, v1i64, v2f32, v2i32, v4i16, v4f16, v4bf16, v8i8, iPTR],
173           CCBitConvertToType<i64>>,
174
175  // Larger floating-point/vector values are passed indirectly.
176  CCIfType<[f128, v2f64, v2i64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8],
177           CCPassIndirect<i64>>,
178  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
179            nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64],
180           CCPassIndirect<i64>>,
181  CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
182           CCPassIndirect<i64>>,
183
184  // Handle SRet. See comment in CC_AArch64_AAPCS.
185  CCIfInReg<CCIfType<[i64],
186    CCIfSRet<CCIfType<[i64], CCAssignToReg<[X0, X1]>>>>>,
187  CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>,
188
189  // Put ByVal arguments directly on the stack. Minimum size and alignment of a
190  // slot is 64-bit. (Shouldn't normally come up; the Microsoft ABI doesn't
191  // use byval.)
192  CCIfByVal<CCPassByVal<8, 8>>,
193
194  // Promote small integers to i32
195  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
196
197  // Pass first four arguments in x0-x3.
198  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3]>>,
199  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3]>>,
200
201  // Put remaining arguments on stack.
202  CCIfType<[i32, i64], CCAssignToStack<8, 8>>,
203]>;
204
205// Windows Control Flow Guard checks take a single argument (the target function
206// address) and have no return value.
207let Entry = 1 in
208def CC_AArch64_Win64_CFGuard_Check : CallingConv<[
209  CCIfType<[i64], CCAssignToReg<[X15]>>
210]>;
211
212
213// Darwin uses a calling convention which differs in only two ways
214// from the standard one at this level:
215//     + i128s (i.e. split i64s) don't need even registers.
216//     + Stack slots are sized as needed rather than being at least 64-bit.
217let Entry = 1 in
218def CC_AArch64_DarwinPCS : CallingConv<[
219  CCIfType<[iPTR], CCBitConvertToType<i64>>,
220  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
221  CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
222
223  // An SRet is passed in X8, not X0 like a normal pointer parameter.
224  CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>,
225
226  // Put ByVal arguments directly on the stack. Minimum size and alignment of a
227  // slot is 64-bit.
228  CCIfByVal<CCPassByVal<8, 8>>,
229
230  // Pass SwiftSelf in a callee saved register.
231  CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>,
232
233  // A SwiftError is passed in X21.
234  CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>,
235
236  // Pass SwiftAsync in an otherwise callee saved register so that it will be
237  // preserved for normal function calls.
238  CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>,
239
240  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
241
242  // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
243  // up to eight each of GPR and FPR.
244  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
245  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>,
246  // i128 is split to two i64s, we can't fit half to register X7.
247  CCIfType<[i64],
248           CCIfSplit<CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6]>>>,
249  // i128 is split to two i64s, and its stack alignment is 16 bytes.
250  CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
251
252  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
253  CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
254  CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>,
255  CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
256  CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
257  CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
258           CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
259  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
260           CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
261
262  // If more than will fit in registers, pass them on the stack instead.
263  CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>,
264  CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16 || ValVT == MVT::bf16",
265  CCAssignToStack<2, 2>>,
266  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
267
268  // Re-demote pointers to 32-bits so we don't end up storing 64-bit
269  // values and clobbering neighbouring stack locations. Not very pretty.
270  CCIfPtr<CCIfILP32<CCTruncToType<i32>>>,
271  CCIfPtr<CCIfILP32<CCAssignToStack<4, 4>>>,
272
273  CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16],
274           CCAssignToStack<8, 8>>,
275  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
276           CCAssignToStack<16, 16>>
277]>;
278
279let Entry = 1 in
280def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
281  CCIfType<[iPTR], CCBitConvertToType<i64>>,
282  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
283  CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
284
285  CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>,
286
287  // Handle all scalar types as either i64 or f64.
288  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
289  CCIfType<[f16, bf16, f32], CCPromoteToType<f64>>,
290
291  // Everything is on the stack.
292  // i128 is split to two i64s, and its stack alignment is 16 bytes.
293  CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
294  CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
295           CCAssignToStack<8, 8>>,
296  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
297           CCAssignToStack<16, 16>>
298]>;
299
300// In the ILP32 world, the minimum stack slot size is 4 bytes. Otherwise the
301// same as the normal Darwin VarArgs handling.
302let Entry = 1 in
303def CC_AArch64_DarwinPCS_ILP32_VarArg : CallingConv<[
304  CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
305  CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
306
307  // Handle all scalar types as either i32 or f32.
308  CCIfType<[i8, i16], CCPromoteToType<i32>>,
309  CCIfType<[f16, bf16], CCPromoteToType<f32>>,
310
311  // Everything is on the stack.
312  // i128 is split to two i64s, and its stack alignment is 16 bytes.
313  CCIfPtr<CCIfILP32<CCTruncToType<i32>>>,
314  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
315  CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
316  CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16],
317           CCAssignToStack<8, 8>>,
318  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16],
319           CCAssignToStack<16, 16>>
320]>;
321
322
323// The WebKit_JS calling convention only passes the first argument (the callee)
324// in register and the remaining arguments on stack. We allow 32bit stack slots,
325// so that WebKit can write partial values in the stack and define the other
326// 32bit quantity as undef.
327let Entry = 1 in
328def CC_AArch64_WebKit_JS : CallingConv<[
329  // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
330  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
331  CCIfType<[i32], CCAssignToReg<[W0]>>,
332  CCIfType<[i64], CCAssignToReg<[X0]>>,
333
334  // Pass the remaining arguments on the stack instead.
335  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
336  CCIfType<[i64, f64], CCAssignToStack<8, 8>>
337]>;
338
339let Entry = 1 in
340def RetCC_AArch64_WebKit_JS : CallingConv<[
341  CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>,
342  CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>,
343  CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>,
344  CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>
345]>;
346
347//===----------------------------------------------------------------------===//
348// ARM64 Calling Convention for GHC
349//===----------------------------------------------------------------------===//
350
351// This calling convention is specific to the Glasgow Haskell Compiler.
352// The only documentation is the GHC source code, specifically the C header
353// file:
354//
355//     https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h
356//
357// which defines the registers for the Spineless Tagless G-Machine (STG) that
358// GHC uses to implement lazy evaluation. The generic STG machine has a set of
359// registers which are mapped to appropriate set of architecture specific
360// registers for each CPU architecture.
361//
362// The STG Machine is documented here:
363//
364//    https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode
365//
366// The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI
367// register mapping".
368
369let Entry = 1 in
370def CC_AArch64_GHC : CallingConv<[
371  CCIfType<[iPTR], CCBitConvertToType<i64>>,
372
373  // Handle all vector types as either f64 or v2f64.
374  CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
375  CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>,
376
377  CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>,
378  CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>,
379  CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>,
380
381  // Promote i8/i16/i32 arguments to i64.
382  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
383
384  // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
385  CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>>
386]>;
387
388// The order of the callee-saves in this file is important, because the
389// FrameLowering code will use this order to determine the layout the
390// callee-save area in the stack frame. As can be observed below, Darwin
391// requires the frame-record (LR, FP) to be at the top the callee-save area,
392// whereas for other platforms they are at the bottom.
393
394// FIXME: LR is only callee-saved in the sense that *we* preserve it and are
395// presumably a callee to someone. External functions may not do so, but this
396// is currently safe since BL has LR as an implicit-def and what happens after a
397// tail call doesn't matter.
398//
399// It would be better to model its preservation semantics properly (create a
400// vreg on entry, use it in RET & tail call generation; make that vreg def if we
401// end up saving LR as part of a call frame). Watch this space...
402def CSR_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
403                                           X25, X26, X27, X28, LR, FP,
404                                           D8,  D9,  D10, D11,
405                                           D12, D13, D14, D15)>;
406
407// A variant for treating X18 as callee saved, when interfacing with
408// code that needs X18 to be preserved.
409def CSR_AArch64_AAPCS_X18 : CalleeSavedRegs<(add X18, CSR_AArch64_AAPCS)>;
410
411// Win64 has unwinding codes for an (FP,LR) pair, save_fplr and save_fplr_x.
412// We put FP before LR, so that frame lowering logic generates (FP,LR) pairs,
413// and not (LR,FP) pairs.
414def CSR_Win_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
415                                               X25, X26, X27, X28, FP, LR,
416                                               D8, D9, D10, D11,
417                                               D12, D13, D14, D15)>;
418
419def CSR_Win_AArch64_AAPCS_SwiftError
420    : CalleeSavedRegs<(sub CSR_Win_AArch64_AAPCS, X21)>;
421
422def CSR_Win_AArch64_AAPCS_SwiftTail
423    : CalleeSavedRegs<(sub CSR_Win_AArch64_AAPCS, X20, X22)>;
424
425// The Control Flow Guard check call uses a custom calling convention that also
426// preserves X0-X8 and Q0-Q7.
427def CSR_Win_AArch64_CFGuard_Check : CalleeSavedRegs<(add CSR_Win_AArch64_AAPCS,
428                                               (sequence "X%u", 0, 8),
429                                               (sequence "Q%u", 0, 7))>;
430
431// AArch64 PCS for vector functions (VPCS)
432// must (additionally) preserve full Q8-Q23 registers
433def CSR_AArch64_AAVPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24,
434                                          X25, X26, X27, X28, LR, FP,
435                                          (sequence "Q%u", 8, 23))>;
436
437// Functions taking SVE arguments or returning an SVE type
438// must (additionally) preserve full Z8-Z23 and predicate registers P4-P15
439def CSR_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "Z%u", 8, 23),
440                                                 (sequence "P%u", 4, 15),
441                                                 X19, X20, X21, X22, X23, X24,
442                                                 X25, X26, X27, X28, LR, FP)>;
443
444// SME ABI support routines such as __arm_tpidr2_save/restore preserve most registers.
445def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
446                          : CalleeSavedRegs<(add (sequence "Z%u", 0, 31),
447                                                 (sequence "P%u", 0, 15),
448                                                 (sequence "X%u", 0, 13),
449                                                 (sequence "X%u",19, 28),
450                                                 LR, FP)>;
451
452// SME ABI support routines __arm_sme_state preserves most registers.
453def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
454                          : CalleeSavedRegs<(add (sequence "Z%u", 0, 31),
455                                                 (sequence "P%u", 0, 15),
456                                                 (sequence "X%u", 2, 15),
457                                                 (sequence "X%u",19, 28),
458                                                 LR, FP)>;
459
460// The SMSTART/SMSTOP instructions preserve only GPR registers.
461def CSR_AArch64_SMStartStop : CalleeSavedRegs<(add (sequence "X%u", 0, 28),
462                                                   LR, FP)>;
463
464def CSR_AArch64_AAPCS_SwiftTail
465    : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X20, X22)>;
466
467// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
468// 'this' and the pointer return value are both passed in X0 in these cases,
469// this can be partially modelled by treating X0 as a callee-saved register;
470// only the resulting RegMask is used; the SaveList is ignored
471//
472// (For generic ARM 64-bit ABI code, clang will not generate constructors or
473// destructors with 'this' returns, so this RegMask will not be used in that
474// case)
475def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
476
477def CSR_AArch64_AAPCS_SwiftError
478    : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X21)>;
479
480// The ELF stub used for TLS-descriptor access saves every feasible
481// register. Only X0 and LR are clobbered.
482def CSR_AArch64_TLS_ELF
483    : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
484                           (sequence "Q%u", 0, 31))>;
485
486def CSR_AArch64_AllRegs
487    : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
488                           (sequence "X%u", 0, 28), FP, LR, SP,
489                           (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
490                           (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
491                           (sequence "Q%u", 0, 31))>;
492
493def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>;
494
495def CSR_AArch64_RT_MostRegs :  CalleeSavedRegs<(add CSR_AArch64_AAPCS,
496                                                (sequence "X%u", 9, 15))>;
497
498def CSR_AArch64_RT_AllRegs :  CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs,
499                                                (sequence "Q%u", 8, 31))>;
500
501def CSR_AArch64_StackProbe_Windows
502    : CalleeSavedRegs<(add (sequence "X%u", 0, 15),
503                           (sequence "X%u", 18, 28), FP, SP,
504                           (sequence "Q%u", 0, 31))>;
505
506// Darwin variants of AAPCS.
507// Darwin puts the frame-record at the top of the callee-save area.
508def CSR_Darwin_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
509                                                X23, X24, X25, X26, X27, X28,
510                                                D8,  D9,  D10, D11,
511                                                D12, D13, D14, D15)>;
512
513def CSR_Darwin_AArch64_AAVPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21,
514                                                 X22, X23, X24, X25, X26, X27,
515                                                 X28, (sequence "Q%u", 8, 23))>;
516
517// For Windows calling convention on a non-windows OS, where X18 is treated
518// as reserved, back up X18 when entering non-windows code (marked with the
519// Windows calling convention) and restore when returning regardless of
520// whether the individual function uses it - it might call other functions
521// that clobber it.
522def CSR_Darwin_AArch64_AAPCS_Win64
523    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, X18)>;
524
525def CSR_Darwin_AArch64_AAPCS_ThisReturn
526    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, X0)>;
527
528def CSR_Darwin_AArch64_AAPCS_SwiftError
529    : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X21)>;
530
531def CSR_Darwin_AArch64_AAPCS_SwiftTail
532    : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X20, X22)>;
533
534// The function used by Darwin to obtain the address of a thread-local variable
535// guarantees more than a normal AAPCS function. x16 and x17 are used on the
536// fast path for calculation, but other registers except X0 (argument/return)
537// and LR (it is a call, after all) are preserved.
538def CSR_Darwin_AArch64_TLS
539    : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
540                           FP,
541                           (sequence "Q%u", 0, 31))>;
542
543// We can only handle a register pair with adjacent registers, the register pair
544// should belong to the same class as well. Since the access function on the
545// fast path calls a function that follows CSR_Darwin_AArch64_TLS,
546// CSR_Darwin_AArch64_CXX_TLS should be a subset of CSR_Darwin_AArch64_TLS.
547def CSR_Darwin_AArch64_CXX_TLS
548    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS,
549                           (sub (sequence "X%u", 1, 28), X9, X15, X16, X17, X18, X19),
550                           (sequence "D%u", 0, 31))>;
551
552// CSRs that are handled by prologue, epilogue.
553def CSR_Darwin_AArch64_CXX_TLS_PE
554    : CalleeSavedRegs<(add LR, FP)>;
555
556// CSRs that are handled explicitly via copies.
557def CSR_Darwin_AArch64_CXX_TLS_ViaCopy
558    : CalleeSavedRegs<(sub CSR_Darwin_AArch64_CXX_TLS, LR, FP)>;
559
560def CSR_Darwin_AArch64_RT_MostRegs
561    : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, (sequence "X%u", 9, 15))>;
562
563def CSR_Darwin_AArch64_RT_AllRegs
564    : CalleeSavedRegs<(add CSR_Darwin_AArch64_RT_MostRegs, (sequence "Q%u", 8, 31))>;
565
566// Variants of the standard calling conventions for shadow call stack.
567// These all preserve x18 in addition to any other registers.
568def CSR_AArch64_NoRegs_SCS
569    : CalleeSavedRegs<(add CSR_AArch64_NoRegs, X18)>;
570def CSR_AArch64_AllRegs_SCS
571    : CalleeSavedRegs<(add CSR_AArch64_AllRegs, X18)>;
572def CSR_AArch64_AAPCS_SwiftError_SCS
573    : CalleeSavedRegs<(add CSR_AArch64_AAPCS_SwiftError, X18)>;
574def CSR_AArch64_RT_MostRegs_SCS
575    : CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs, X18)>;
576def CSR_AArch64_RT_AllRegs_SCS
577    : CalleeSavedRegs<(add CSR_AArch64_RT_AllRegs, X18)>;
578def CSR_AArch64_AAVPCS_SCS
579    : CalleeSavedRegs<(add CSR_AArch64_AAVPCS, X18)>;
580def CSR_AArch64_SVE_AAPCS_SCS
581    : CalleeSavedRegs<(add CSR_AArch64_SVE_AAPCS, X18)>;
582def CSR_AArch64_AAPCS_SCS
583    : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X18)>;
584