1//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This describes the calling conventions for AArch64 architecture. 10// 11//===----------------------------------------------------------------------===// 12 13/// CCIfBigEndian - Match only if we're in big endian mode. 14class CCIfBigEndian<CCAction A> : 15 CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>; 16 17class CCIfILP32<CCAction A> : 18 CCIf<"State.getMachineFunction().getDataLayout().getPointerSize() == 4", A>; 19 20 21//===----------------------------------------------------------------------===// 22// ARM AAPCS64 Calling Convention 23//===----------------------------------------------------------------------===// 24 25let Entry = 1 in 26def CC_AArch64_AAPCS : CallingConv<[ 27 CCIfType<[iPTR], CCBitConvertToType<i64>>, 28 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 29 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 30 31 // Big endian vectors must be passed as if they were 1-element vectors so that 32 // their lanes are in a consistent order. 33 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8], 34 CCBitConvertToType<f64>>>, 35 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8], 36 CCBitConvertToType<f128>>>, 37 38 // In AAPCS, an SRet is passed in X8, not X0 like a normal pointer parameter. 39 // However, on windows, in some circumstances, the SRet is passed in X0 or X1 40 // instead. The presence of the inreg attribute indicates that SRet is 41 // passed in the alternative register (X0 or X1), not X8: 42 // - X0 for non-instance methods. 43 // - X1 for instance methods. 44 45 // The "sret" attribute identifies indirect returns. 46 // The "inreg" attribute identifies non-aggregate types. 47 // The position of the "sret" attribute identifies instance/non-instance 48 // methods. 49 // "sret" on argument 0 means non-instance methods. 50 // "sret" on argument 1 means instance methods. 51 52 CCIfInReg<CCIfType<[i64], 53 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X0, X1]>>>>>, 54 55 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>, 56 57 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 58 // slot is 64-bit. 59 CCIfByVal<CCPassByVal<8, 8>>, 60 61 // The 'nest' parameter, if any, is passed in X18. 62 // Darwin uses X18 as the platform register and hence 'nest' isn't currently 63 // supported there. 64 CCIfNest<CCAssignToReg<[X18]>>, 65 66 // Pass SwiftSelf in a callee saved register. 67 CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>, 68 69 // A SwiftError is passed in X21. 70 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>, 71 72 // Pass SwiftAsync in an otherwise callee saved register so that it will be 73 // preserved for normal function calls. 74 CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>, 75 76 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 77 78 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, 79 nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64], 80 CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>, 81 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, 82 nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64], 83 CCPassIndirect<i64>>, 84 85 CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1], 86 CCAssignToReg<[P0, P1, P2, P3]>>, 87 CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1], 88 CCPassIndirect<i64>>, 89 90 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 91 // up to eight each of GPR and FPR. 92 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 93 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>, 94 // i128 is split to two i64s, we can't fit half to register X7. 95 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6], 96 [X0, X1, X3, X5]>>>, 97 98 // i128 is split to two i64s, and its stack alignment is 16 bytes. 99 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 100 101 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>, 102 CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 103 CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 104 CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>, 105 CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 106 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 107 CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 108 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 109 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 110 111 // If more than will fit in registers, pass them on the stack instead. 112 CCIfType<[i1, i8, i16, f16, bf16], CCAssignToStack<8, 8>>, 113 CCIfType<[i32, f32], CCAssignToStack<8, 8>>, 114 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16], 115 CCAssignToStack<8, 8>>, 116 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 117 CCAssignToStack<16, 16>> 118]>; 119 120let Entry = 1 in 121def RetCC_AArch64_AAPCS : CallingConv<[ 122 CCIfType<[iPTR], CCBitConvertToType<i64>>, 123 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 124 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 125 126 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 127 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>, 128 129 // Big endian vectors must be passed as if they were 1-element vectors so that 130 // their lanes are in a consistent order. 131 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8], 132 CCBitConvertToType<f64>>>, 133 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8], 134 CCBitConvertToType<f128>>>, 135 136 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 137 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>, 138 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>, 139 CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 140 CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 141 CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>, 142 CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 143 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 144 CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 145 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 146 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 147 148 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, 149 nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64], 150 CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>, 151 152 CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1], 153 CCAssignToReg<[P0, P1, P2, P3]>> 154]>; 155 156// Vararg functions on windows pass floats in integer registers 157let Entry = 1 in 158def CC_AArch64_Win64_VarArg : CallingConv<[ 159 CCIfType<[f16, bf16], CCBitConvertToType<i16>>, 160 CCIfType<[f32], CCBitConvertToType<i32>>, 161 CCIfType<[f64], CCBitConvertToType<i64>>, 162 CCDelegateTo<CC_AArch64_AAPCS> 163]>; 164 165// Windows Control Flow Guard checks take a single argument (the target function 166// address) and have no return value. 167let Entry = 1 in 168def CC_AArch64_Win64_CFGuard_Check : CallingConv<[ 169 CCIfType<[i64], CCAssignToReg<[X15]>> 170]>; 171 172 173// Darwin uses a calling convention which differs in only two ways 174// from the standard one at this level: 175// + i128s (i.e. split i64s) don't need even registers. 176// + Stack slots are sized as needed rather than being at least 64-bit. 177let Entry = 1 in 178def CC_AArch64_DarwinPCS : CallingConv<[ 179 CCIfType<[iPTR], CCBitConvertToType<i64>>, 180 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 181 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 182 183 // An SRet is passed in X8, not X0 like a normal pointer parameter. 184 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>, 185 186 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 187 // slot is 64-bit. 188 CCIfByVal<CCPassByVal<8, 8>>, 189 190 // Pass SwiftSelf in a callee saved register. 191 CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>, 192 193 // A SwiftError is passed in X21. 194 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>, 195 196 // Pass SwiftAsync in an otherwise callee saved register so that it will be 197 // preserved for normal function calls. 198 CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>, 199 200 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 201 202 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 203 // up to eight each of GPR and FPR. 204 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 205 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>, 206 // i128 is split to two i64s, we can't fit half to register X7. 207 CCIfType<[i64], 208 CCIfSplit<CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6]>>>, 209 // i128 is split to two i64s, and its stack alignment is 16 bytes. 210 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 211 212 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>, 213 CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 214 CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 215 CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>, 216 CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 217 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 218 CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 219 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 220 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 221 222 // If more than will fit in registers, pass them on the stack instead. 223 CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>, 224 CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16 || ValVT == MVT::bf16", 225 CCAssignToStack<2, 2>>, 226 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 227 228 // Re-demote pointers to 32-bits so we don't end up storing 64-bit 229 // values and clobbering neighbouring stack locations. Not very pretty. 230 CCIfPtr<CCIfILP32<CCTruncToType<i32>>>, 231 CCIfPtr<CCIfILP32<CCAssignToStack<4, 4>>>, 232 233 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16], 234 CCAssignToStack<8, 8>>, 235 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 236 CCAssignToStack<16, 16>> 237]>; 238 239let Entry = 1 in 240def CC_AArch64_DarwinPCS_VarArg : CallingConv<[ 241 CCIfType<[iPTR], CCBitConvertToType<i64>>, 242 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 243 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 244 245 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>, 246 247 // Handle all scalar types as either i64 or f64. 248 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 249 CCIfType<[f16, bf16, f32], CCPromoteToType<f64>>, 250 251 // Everything is on the stack. 252 // i128 is split to two i64s, and its stack alignment is 16 bytes. 253 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>, 254 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 255 CCAssignToStack<8, 8>>, 256 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 257 CCAssignToStack<16, 16>> 258]>; 259 260// In the ILP32 world, the minimum stack slot size is 4 bytes. Otherwise the 261// same as the normal Darwin VarArgs handling. 262let Entry = 1 in 263def CC_AArch64_DarwinPCS_ILP32_VarArg : CallingConv<[ 264 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 265 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 266 267 // Handle all scalar types as either i32 or f32. 268 CCIfType<[i8, i16], CCPromoteToType<i32>>, 269 CCIfType<[f16, bf16], CCPromoteToType<f32>>, 270 271 // Everything is on the stack. 272 // i128 is split to two i64s, and its stack alignment is 16 bytes. 273 CCIfPtr<CCIfILP32<CCTruncToType<i32>>>, 274 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 275 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>, 276 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 277 CCAssignToStack<8, 8>>, 278 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 279 CCAssignToStack<16, 16>> 280]>; 281 282 283// The WebKit_JS calling convention only passes the first argument (the callee) 284// in register and the remaining arguments on stack. We allow 32bit stack slots, 285// so that WebKit can write partial values in the stack and define the other 286// 32bit quantity as undef. 287let Entry = 1 in 288def CC_AArch64_WebKit_JS : CallingConv<[ 289 // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0). 290 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 291 CCIfType<[i32], CCAssignToReg<[W0]>>, 292 CCIfType<[i64], CCAssignToReg<[X0]>>, 293 294 // Pass the remaining arguments on the stack instead. 295 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 296 CCIfType<[i64, f64], CCAssignToStack<8, 8>> 297]>; 298 299let Entry = 1 in 300def RetCC_AArch64_WebKit_JS : CallingConv<[ 301 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>, 302 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>, 303 CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>, 304 CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>> 305]>; 306 307//===----------------------------------------------------------------------===// 308// ARM64 Calling Convention for GHC 309//===----------------------------------------------------------------------===// 310 311// This calling convention is specific to the Glasgow Haskell Compiler. 312// The only documentation is the GHC source code, specifically the C header 313// file: 314// 315// https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h 316// 317// which defines the registers for the Spineless Tagless G-Machine (STG) that 318// GHC uses to implement lazy evaluation. The generic STG machine has a set of 319// registers which are mapped to appropriate set of architecture specific 320// registers for each CPU architecture. 321// 322// The STG Machine is documented here: 323// 324// https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode 325// 326// The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI 327// register mapping". 328 329let Entry = 1 in 330def CC_AArch64_GHC : CallingConv<[ 331 CCIfType<[iPTR], CCBitConvertToType<i64>>, 332 333 // Handle all vector types as either f64 or v2f64. 334 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 335 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>, 336 337 CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>, 338 CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>, 339 CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>, 340 341 // Promote i8/i16/i32 arguments to i64. 342 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 343 344 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim 345 CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>> 346]>; 347 348// The order of the callee-saves in this file is important, because the 349// FrameLowering code will use this order to determine the layout the 350// callee-save area in the stack frame. As can be observed below, Darwin 351// requires the frame-record (LR, FP) to be at the top the callee-save area, 352// whereas for other platforms they are at the bottom. 353 354// FIXME: LR is only callee-saved in the sense that *we* preserve it and are 355// presumably a callee to someone. External functions may not do so, but this 356// is currently safe since BL has LR as an implicit-def and what happens after a 357// tail call doesn't matter. 358// 359// It would be better to model its preservation semantics properly (create a 360// vreg on entry, use it in RET & tail call generation; make that vreg def if we 361// end up saving LR as part of a call frame). Watch this space... 362def CSR_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24, 363 X25, X26, X27, X28, LR, FP, 364 D8, D9, D10, D11, 365 D12, D13, D14, D15)>; 366 367// A variant for treating X18 as callee saved, when interfacing with 368// code that needs X18 to be preserved. 369def CSR_AArch64_AAPCS_X18 : CalleeSavedRegs<(add X18, CSR_AArch64_AAPCS)>; 370 371// Win64 has unwinding codes for an (FP,LR) pair, save_fplr and save_fplr_x. 372// We put FP before LR, so that frame lowering logic generates (FP,LR) pairs, 373// and not (LR,FP) pairs. 374def CSR_Win_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24, 375 X25, X26, X27, X28, FP, LR, 376 D8, D9, D10, D11, 377 D12, D13, D14, D15)>; 378 379// The Control Flow Guard check call uses a custom calling convention that also 380// preserves X0-X8 and Q0-Q7. 381def CSR_Win_AArch64_CFGuard_Check : CalleeSavedRegs<(add CSR_Win_AArch64_AAPCS, 382 (sequence "X%u", 0, 8), 383 (sequence "Q%u", 0, 7))>; 384 385// AArch64 PCS for vector functions (VPCS) 386// must (additionally) preserve full Q8-Q23 registers 387def CSR_AArch64_AAVPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24, 388 X25, X26, X27, X28, LR, FP, 389 (sequence "Q%u", 8, 23))>; 390 391// Functions taking SVE arguments or returning an SVE type 392// must (additionally) preserve full Z8-Z23 and predicate registers P4-P15 393def CSR_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "Z%u", 8, 23), 394 (sequence "P%u", 4, 15), 395 X19, X20, X21, X22, X23, X24, 396 X25, X26, X27, X28, LR, FP)>; 397 398def CSR_AArch64_AAPCS_SwiftTail 399 : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X20, X22)>; 400 401// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since 402// 'this' and the pointer return value are both passed in X0 in these cases, 403// this can be partially modelled by treating X0 as a callee-saved register; 404// only the resulting RegMask is used; the SaveList is ignored 405// 406// (For generic ARM 64-bit ABI code, clang will not generate constructors or 407// destructors with 'this' returns, so this RegMask will not be used in that 408// case) 409def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>; 410 411def CSR_AArch64_AAPCS_SwiftError 412 : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X21)>; 413 414// The ELF stub used for TLS-descriptor access saves every feasible 415// register. Only X0 and LR are clobbered. 416def CSR_AArch64_TLS_ELF 417 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP, 418 (sequence "Q%u", 0, 31))>; 419 420def CSR_AArch64_AllRegs 421 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP, 422 (sequence "X%u", 0, 28), FP, LR, SP, 423 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31), 424 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31), 425 (sequence "Q%u", 0, 31))>; 426 427def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>; 428 429def CSR_AArch64_RT_MostRegs : CalleeSavedRegs<(add CSR_AArch64_AAPCS, 430 (sequence "X%u", 9, 15))>; 431 432def CSR_AArch64_StackProbe_Windows 433 : CalleeSavedRegs<(add (sequence "X%u", 0, 15), 434 (sequence "X%u", 18, 28), FP, SP, 435 (sequence "Q%u", 0, 31))>; 436 437// Darwin variants of AAPCS. 438// Darwin puts the frame-record at the top of the callee-save area. 439def CSR_Darwin_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22, 440 X23, X24, X25, X26, X27, X28, 441 D8, D9, D10, D11, 442 D12, D13, D14, D15)>; 443 444def CSR_Darwin_AArch64_AAVPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, 445 X22, X23, X24, X25, X26, X27, 446 X28, (sequence "Q%u", 8, 23))>; 447def CSR_Darwin_AArch64_AAPCS_ThisReturn 448 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, X0)>; 449 450def CSR_Darwin_AArch64_AAPCS_SwiftError 451 : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X21)>; 452 453def CSR_Darwin_AArch64_AAPCS_SwiftTail 454 : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X20, X22)>; 455 456// The function used by Darwin to obtain the address of a thread-local variable 457// guarantees more than a normal AAPCS function. x16 and x17 are used on the 458// fast path for calculation, but other registers except X0 (argument/return) 459// and LR (it is a call, after all) are preserved. 460def CSR_Darwin_AArch64_TLS 461 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17), 462 FP, 463 (sequence "Q%u", 0, 31))>; 464 465// We can only handle a register pair with adjacent registers, the register pair 466// should belong to the same class as well. Since the access function on the 467// fast path calls a function that follows CSR_Darwin_AArch64_TLS, 468// CSR_Darwin_AArch64_CXX_TLS should be a subset of CSR_Darwin_AArch64_TLS. 469def CSR_Darwin_AArch64_CXX_TLS 470 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, 471 (sub (sequence "X%u", 1, 28), X9, X15, X16, X17, X18, X19), 472 (sequence "D%u", 0, 31))>; 473 474// CSRs that are handled by prologue, epilogue. 475def CSR_Darwin_AArch64_CXX_TLS_PE 476 : CalleeSavedRegs<(add LR, FP)>; 477 478// CSRs that are handled explicitly via copies. 479def CSR_Darwin_AArch64_CXX_TLS_ViaCopy 480 : CalleeSavedRegs<(sub CSR_Darwin_AArch64_CXX_TLS, LR, FP)>; 481 482def CSR_Darwin_AArch64_RT_MostRegs 483 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, (sequence "X%u", 9, 15))>; 484 485// Variants of the standard calling conventions for shadow call stack. 486// These all preserve x18 in addition to any other registers. 487def CSR_AArch64_NoRegs_SCS 488 : CalleeSavedRegs<(add CSR_AArch64_NoRegs, X18)>; 489def CSR_AArch64_AllRegs_SCS 490 : CalleeSavedRegs<(add CSR_AArch64_AllRegs, X18)>; 491def CSR_AArch64_AAPCS_SwiftError_SCS 492 : CalleeSavedRegs<(add CSR_AArch64_AAPCS_SwiftError, X18)>; 493def CSR_AArch64_RT_MostRegs_SCS 494 : CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs, X18)>; 495def CSR_AArch64_AAVPCS_SCS 496 : CalleeSavedRegs<(add CSR_AArch64_AAVPCS, X18)>; 497def CSR_AArch64_SVE_AAPCS_SCS 498 : CalleeSavedRegs<(add CSR_AArch64_SVE_AAPCS, X18)>; 499def CSR_AArch64_AAPCS_SCS 500 : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X18)>; 501