1//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This describes the calling conventions for AArch64 architecture. 10// 11//===----------------------------------------------------------------------===// 12 13/// CCIfBigEndian - Match only if we're in big endian mode. 14class CCIfBigEndian<CCAction A> : 15 CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>; 16 17class CCIfILP32<CCAction A> : 18 CCIf<"State.getMachineFunction().getDataLayout().getPointerSize() == 4", A>; 19 20 21//===----------------------------------------------------------------------===// 22// ARM AAPCS64 Calling Convention 23//===----------------------------------------------------------------------===// 24 25defvar AArch64_Common = [ 26 CCIfType<[iPTR], CCBitConvertToType<i64>>, 27 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 28 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 29 30 // Big endian vectors must be passed as if they were 1-element vectors so that 31 // their lanes are in a consistent order. 32 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8], 33 CCBitConvertToType<f64>>>, 34 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8], 35 CCBitConvertToType<f128>>>, 36 37 // In AAPCS, an SRet is passed in X8, not X0 like a normal pointer parameter. 38 // However, on windows, in some circumstances, the SRet is passed in X0 or X1 39 // instead. The presence of the inreg attribute indicates that SRet is 40 // passed in the alternative register (X0 or X1), not X8: 41 // - X0 for non-instance methods. 42 // - X1 for instance methods. 43 44 // The "sret" attribute identifies indirect returns. 45 // The "inreg" attribute identifies non-aggregate types. 46 // The position of the "sret" attribute identifies instance/non-instance 47 // methods. 48 // "sret" on argument 0 means non-instance methods. 49 // "sret" on argument 1 means instance methods. 50 51 CCIfInReg<CCIfType<[i64], 52 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X0, X1]>>>>>, 53 54 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>, 55 56 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 57 // slot is 64-bit. 58 CCIfByVal<CCPassByVal<8, 8>>, 59 60 // Pass SwiftSelf in a callee saved register. 61 CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>, 62 63 // A SwiftError is passed in X21. 64 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>, 65 66 // Pass SwiftAsync in an otherwise callee saved register so that it will be 67 // preserved for normal function calls. 68 CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>, 69 70 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 71 72 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, 73 nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64], 74 CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>, 75 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, 76 nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64], 77 CCPassIndirect<i64>>, 78 79 CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount], 80 CCAssignToReg<[P0, P1, P2, P3]>>, 81 CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount], 82 CCPassIndirect<i64>>, 83 84 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 85 // up to eight each of GPR and FPR. 86 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 87 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>, 88 // i128 is split to two i64s, we can't fit half to register X7. 89 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6], 90 [X0, X1, X3, X5]>>>, 91 92 // i128 is split to two i64s, and its stack alignment is 16 bytes. 93 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 94 95 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>, 96 CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 97 CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 98 CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>, 99 CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 100 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 101 CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 102 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 103 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 104 105 // If more than will fit in registers, pass them on the stack instead. 106 CCIfType<[i1, i8, i16, f16, bf16], CCAssignToStack<8, 8>>, 107 CCIfType<[i32, f32], CCAssignToStack<8, 8>>, 108 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16], 109 CCAssignToStack<8, 8>>, 110 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 111 CCAssignToStack<16, 16>> 112]; 113 114let Entry = 1 in 115def CC_AArch64_AAPCS : CallingConv<!listconcat( 116 // The 'nest' parameter, if any, is passed in X18. 117 // Darwin and Windows use X18 as the platform register and hence 'nest' isn't 118 // currently supported there. 119 [CCIfNest<CCAssignToReg<[X18]>>], 120 AArch64_Common 121)>; 122 123let Entry = 1 in 124def RetCC_AArch64_AAPCS : CallingConv<[ 125 CCIfType<[iPTR], CCBitConvertToType<i64>>, 126 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 127 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 128 129 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 130 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>, 131 132 // Big endian vectors must be passed as if they were 1-element vectors so that 133 // their lanes are in a consistent order. 134 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v4bf16, v8i8], 135 CCBitConvertToType<f64>>>, 136 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8], 137 CCBitConvertToType<f128>>>, 138 139 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 140 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>, 141 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>, 142 CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 143 CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 144 CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>, 145 CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 146 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 147 CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 148 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 149 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 150 151 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, 152 nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64], 153 CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>, 154 155 CCIfType<[nxv1i1, nxv2i1, nxv4i1, nxv8i1, nxv16i1, aarch64svcount], 156 CCAssignToReg<[P0, P1, P2, P3]>> 157]>; 158 159let Entry = 1 in 160def CC_AArch64_Win64PCS : CallingConv<AArch64_Common>; 161 162// Vararg functions on windows pass floats in integer registers 163let Entry = 1 in 164def CC_AArch64_Win64_VarArg : CallingConv<[ 165 CCIfType<[f16, bf16], CCBitConvertToType<i16>>, 166 CCIfType<[f32], CCBitConvertToType<i32>>, 167 CCIfType<[f64], CCBitConvertToType<i64>>, 168 CCDelegateTo<CC_AArch64_Win64PCS> 169]>; 170 171// Vararg functions on Arm64EC ABI use a different convention, using 172// a stack layout compatible with the x64 calling convention. 173let Entry = 1 in 174def CC_AArch64_Arm64EC_VarArg : CallingConv<[ 175 // Convert small floating-point values to integer. 176 CCIfType<[f16, bf16], CCBitConvertToType<i16>>, 177 CCIfType<[f32], CCBitConvertToType<i32>>, 178 CCIfType<[f64, v1f64, v1i64, v2f32, v2i32, v4i16, v4f16, v4bf16, v8i8, iPTR], 179 CCBitConvertToType<i64>>, 180 181 // Larger floating-point/vector values are passed indirectly. 182 CCIfType<[f128, v2f64, v2i64, v4i32, v4f32, v8i16, v8f16, v8bf16, v16i8], 183 CCPassIndirect<i64>>, 184 CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16, 185 nxv2bf16, nxv4bf16, nxv8bf16, nxv2f32, nxv4f32, nxv2f64], 186 CCPassIndirect<i64>>, 187 CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1], 188 CCPassIndirect<i64>>, 189 190 // Handle SRet. See comment in CC_AArch64_AAPCS. 191 CCIfInReg<CCIfType<[i64], 192 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X0, X1]>>>>>, 193 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>, 194 195 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 196 // slot is 64-bit. (Shouldn't normally come up; the Microsoft ABI doesn't 197 // use byval.) 198 CCIfByVal<CCPassByVal<8, 8>>, 199 200 // Promote small integers to i32 201 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 202 203 // Pass first four arguments in x0-x3. 204 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3]>>, 205 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3]>>, 206 207 // Put remaining arguments on stack. 208 CCIfType<[i32, i64], CCAssignToStack<8, 8>>, 209]>; 210 211// Arm64EC thunks use a calling convention that's precisely the x64 calling 212// convention, except that the registers have different names, and the callee 213// address is passed in X9. 214let Entry = 1 in 215def CC_AArch64_Arm64EC_Thunk : CallingConv<[ 216 // ARM64EC-specific: the InReg attribute can be used to access the x64 sp passed into entry thunks in x4 from the IR. 217 CCIfInReg<CCIfType<[i64], CCAssignToReg<[X4]>>>, 218 219 // Byval aggregates are passed by pointer 220 CCIfByVal<CCPassIndirect<i64>>, 221 222 // ARM64EC-specific: promote small integers to i32. (x86 only promotes i1, 223 // but that would confuse ARM64 lowering code.) 224 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 225 226 // The 'nest' parameter, if any, is passed in R10 (X4). 227 CCIfNest<CCAssignToReg<[X4]>>, 228 229 // A SwiftError is passed in R12 (X19). 230 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X19]>>>, 231 232 // Pass SwiftSelf in R13 (X20). 233 CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>, 234 235 // Pass SwiftAsync in an otherwise callee saved register so that calls to 236 // normal functions don't need to save it somewhere. 237 CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X21]>>>, 238 239 // The 'CFGuardTarget' parameter, if any, is passed in RAX (R8). 240 CCIfCFGuardTarget<CCAssignToReg<[X8]>>, 241 242 // 128 bit vectors are passed by pointer 243 CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>, 244 245 // 256 bit vectors are passed by pointer 246 CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>, 247 248 // 512 bit vectors are passed by pointer 249 CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>, 250 251 // Long doubles are passed by pointer 252 CCIfType<[f80], CCPassIndirect<i64>>, 253 254 // The first 4 MMX vector arguments are passed in GPRs. 255 CCIfType<[x86mmx], CCBitConvertToType<i64>>, 256 257 // The first 4 FP/Vector arguments are passed in XMM registers. 258 CCIfType<[f16], 259 CCAssignToRegWithShadow<[H0, H1, H2, H3], 260 [X0, X1, X2, X3]>>, 261 CCIfType<[f32], 262 CCAssignToRegWithShadow<[S0, S1, S2, S3], 263 [X0, X1, X2, X3]>>, 264 CCIfType<[f64], 265 CCAssignToRegWithShadow<[D0, D1, D2, D3], 266 [X0, X1, X2, X3]>>, 267 268 // The first 4 integer arguments are passed in integer registers. 269 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3], 270 [Q0, Q1, Q2, Q3]>>, 271 272 // Arm64EC thunks: the first argument is always a pointer to the destination 273 // address, stored in x9. 274 CCIfType<[i64], CCAssignToReg<[X9]>>, 275 276 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3], 277 [Q0, Q1, Q2, Q3]>>, 278 279 // Integer/FP values get stored in stack slots that are 8 bytes in size and 280 // 8-byte aligned if there are no more registers to hold them. 281 CCIfType<[i8, i16, i32, i64, f16, f32, f64], CCAssignToStack<8, 8>> 282]>; 283 284// The native side of ARM64EC thunks 285let Entry = 1 in 286def CC_AArch64_Arm64EC_Thunk_Native : CallingConv<[ 287 CCIfType<[i64], CCAssignToReg<[X9]>>, 288 CCDelegateTo<CC_AArch64_AAPCS> 289]>; 290 291let Entry = 1 in 292def RetCC_AArch64_Arm64EC_Thunk : CallingConv<[ 293 // The X86-Win64 calling convention always returns __m64 values in RAX. 294 CCIfType<[x86mmx], CCBitConvertToType<i64>>, 295 296 // Otherwise, everything is the same as 'normal' X86-64 C CC. 297 298 // The X86-64 calling convention always returns FP values in XMM0. 299 CCIfType<[f16], CCAssignToReg<[H0, H1]>>, 300 CCIfType<[f32], CCAssignToReg<[S0, S1]>>, 301 CCIfType<[f64], CCAssignToReg<[D0, D1]>>, 302 CCIfType<[f128], CCAssignToReg<[Q0, Q1]>>, 303 304 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X19]>>>, 305 306 // Scalar values are returned in AX first, then DX. For i8, the ABI 307 // requires the values to be in AL and AH, however this code uses AL and DL 308 // instead. This is because using AH for the second register conflicts with 309 // the way LLVM does multiple return values -- a return of {i16,i8} would end 310 // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI 311 // for functions that return two i8 values are currently expected to pack the 312 // values into an i16 (which uses AX, and thus AL:AH). 313 // 314 // For code that doesn't care about the ABI, we allow returning more than two 315 // integer values in registers. 316 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 317 CCIfType<[i32], CCAssignToReg<[W8, W1, W0]>>, 318 CCIfType<[i64], CCAssignToReg<[X8, X1, X0]>>, 319 320 // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 321 // can only be used by ABI non-compliant code. If the target doesn't have XMM 322 // registers, it won't have vector types. 323 CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], 324 CCAssignToReg<[Q0, Q1, Q2, Q3]>> 325]>; 326 327// Windows Control Flow Guard checks take a single argument (the target function 328// address) and have no return value. 329let Entry = 1 in 330def CC_AArch64_Win64_CFGuard_Check : CallingConv<[ 331 CCIfType<[i64], CCAssignToReg<[X15]>> 332]>; 333 334let Entry = 1 in 335def CC_AArch64_Arm64EC_CFGuard_Check : CallingConv<[ 336 CCIfType<[i64], CCAssignToReg<[X11, X10]>> 337]>; 338 339let Entry = 1 in 340def RetCC_AArch64_Arm64EC_CFGuard_Check : CallingConv<[ 341 CCIfType<[i64], CCAssignToReg<[X11]>> 342]>; 343 344 345// Darwin uses a calling convention which differs in only two ways 346// from the standard one at this level: 347// + i128s (i.e. split i64s) don't need even registers. 348// + Stack slots are sized as needed rather than being at least 64-bit. 349let Entry = 1 in 350def CC_AArch64_DarwinPCS : CallingConv<[ 351 CCIfType<[iPTR], CCBitConvertToType<i64>>, 352 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 353 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 354 355 // An SRet is passed in X8, not X0 like a normal pointer parameter. 356 CCIfSRet<CCIfType<[i64], CCAssignToReg<[X8]>>>, 357 358 // Put ByVal arguments directly on the stack. Minimum size and alignment of a 359 // slot is 64-bit. 360 CCIfByVal<CCPassByVal<8, 8>>, 361 362 // Pass SwiftSelf in a callee saved register. 363 CCIfSwiftSelf<CCIfType<[i64], CCAssignToReg<[X20]>>>, 364 365 // A SwiftError is passed in X21. 366 CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[X21]>>>, 367 368 // Pass SwiftAsync in an otherwise callee saved register so that it will be 369 // preserved for normal function calls. 370 CCIfSwiftAsync<CCIfType<[i64], CCAssignToReg<[X22]>>>, 371 372 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 373 374 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 375 // up to eight each of GPR and FPR. 376 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 377 CCIfType<[i32], CCAssignToReg<[W0, W1, W2, W3, W4, W5, W6, W7]>>, 378 // i128 is split to two i64s, we can't fit half to register X7. 379 CCIfType<[i64], 380 CCIfSplit<CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6]>>>, 381 // i128 is split to two i64s, and its stack alignment is 16 bytes. 382 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 383 384 CCIfType<[i64], CCAssignToReg<[X0, X1, X2, X3, X4, X5, X6, X7]>>, 385 CCIfType<[f16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 386 CCIfType<[bf16], CCAssignToReg<[H0, H1, H2, H3, H4, H5, H6, H7]>>, 387 CCIfType<[f32], CCAssignToReg<[S0, S1, S2, S3, S4, S5, S6, S7]>>, 388 CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 389 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 390 CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>, 391 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 392 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 393 394 // If more than will fit in registers, pass them on the stack instead. 395 CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>, 396 CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16 || ValVT == MVT::bf16", 397 CCAssignToStack<2, 2>>, 398 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 399 400 // Re-demote pointers to 32-bits so we don't end up storing 64-bit 401 // values and clobbering neighbouring stack locations. Not very pretty. 402 CCIfPtr<CCIfILP32<CCTruncToType<i32>>>, 403 CCIfPtr<CCIfILP32<CCAssignToStack<4, 4>>>, 404 405 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16, v4bf16], 406 CCAssignToStack<8, 8>>, 407 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 408 CCAssignToStack<16, 16>> 409]>; 410 411let Entry = 1 in 412def CC_AArch64_DarwinPCS_VarArg : CallingConv<[ 413 CCIfType<[iPTR], CCBitConvertToType<i64>>, 414 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 415 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 416 417 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>, 418 419 // Handle all scalar types as either i64 or f64. 420 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 421 CCIfType<[f16, bf16, f32], CCPromoteToType<f64>>, 422 423 // Everything is on the stack. 424 // i128 is split to two i64s, and its stack alignment is 16 bytes. 425 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>, 426 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 427 CCAssignToStack<8, 8>>, 428 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 429 CCAssignToStack<16, 16>> 430]>; 431 432// In the ILP32 world, the minimum stack slot size is 4 bytes. Otherwise the 433// same as the normal Darwin VarArgs handling. 434let Entry = 1 in 435def CC_AArch64_DarwinPCS_ILP32_VarArg : CallingConv<[ 436 CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 437 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 438 439 // Handle all scalar types as either i32 or f32. 440 CCIfType<[i8, i16], CCPromoteToType<i32>>, 441 CCIfType<[f16, bf16], CCPromoteToType<f32>>, 442 443 // Everything is on the stack. 444 // i128 is split to two i64s, and its stack alignment is 16 bytes. 445 CCIfPtr<CCIfILP32<CCTruncToType<i32>>>, 446 CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 447 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>, 448 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16, v4bf16], 449 CCAssignToStack<8, 8>>, 450 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16, v8bf16], 451 CCAssignToStack<16, 16>> 452]>; 453 454//===----------------------------------------------------------------------===// 455// ARM64 Calling Convention for GHC 456//===----------------------------------------------------------------------===// 457 458// This calling convention is specific to the Glasgow Haskell Compiler. 459// The only documentation is the GHC source code, specifically the C header 460// file: 461// 462// https://github.com/ghc/ghc/blob/master/rts/include/stg/MachRegs.h 463// 464// which defines the registers for the Spineless Tagless G-Machine (STG) that 465// GHC uses to implement lazy evaluation. The generic STG machine has a set of 466// registers which are mapped to appropriate set of architecture specific 467// registers for each CPU architecture. 468// 469// The STG Machine is documented here: 470// 471// https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode 472// 473// The AArch64 register mapping is defined in the following header file: 474// 475// https://github.com/ghc/ghc/blob/master/rts/include/stg/MachRegs/arm64.h 476// 477 478let Entry = 1 in 479def CC_AArch64_GHC : CallingConv<[ 480 CCIfType<[iPTR], CCBitConvertToType<i64>>, 481 482 // Handle all vector types as either f64 or v2f64. 483 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 484 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>, 485 486 CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>, 487 CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>, 488 CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>, 489 490 // Promote i8/i16/i32 arguments to i64. 491 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 492 493 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim 494 CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>> 495]>; 496 497// The order of the callee-saves in this file is important, because the 498// FrameLowering code will use this order to determine the layout the 499// callee-save area in the stack frame. As can be observed below, Darwin 500// requires the frame-record (LR, FP) to be at the top the callee-save area, 501// whereas for other platforms they are at the bottom. 502 503// FIXME: LR is only callee-saved in the sense that *we* preserve it and are 504// presumably a callee to someone. External functions may not do so, but this 505// is currently safe since BL has LR as an implicit-def and what happens after a 506// tail call doesn't matter. 507// 508// It would be better to model its preservation semantics properly (create a 509// vreg on entry, use it in RET & tail call generation; make that vreg def if we 510// end up saving LR as part of a call frame). Watch this space... 511def CSR_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24, 512 X25, X26, X27, X28, LR, FP, 513 D8, D9, D10, D11, 514 D12, D13, D14, D15)>; 515 516// A variant for treating X18 as callee saved, when interfacing with 517// code that needs X18 to be preserved. 518def CSR_AArch64_AAPCS_X18 : CalleeSavedRegs<(add X18, CSR_AArch64_AAPCS)>; 519 520// Win64 has unwinding codes for an (FP,LR) pair, save_fplr and save_fplr_x. 521// We put FP before LR, so that frame lowering logic generates (FP,LR) pairs, 522// and not (LR,FP) pairs. 523def CSR_Win_AArch64_AAPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24, 524 X25, X26, X27, X28, FP, LR, 525 D8, D9, D10, D11, 526 D12, D13, D14, D15)>; 527 528def CSR_Win_AArch64_AAPCS_SwiftError 529 : CalleeSavedRegs<(sub CSR_Win_AArch64_AAPCS, X21)>; 530 531def CSR_Win_AArch64_AAPCS_SwiftTail 532 : CalleeSavedRegs<(sub CSR_Win_AArch64_AAPCS, X20, X22)>; 533 534// The Control Flow Guard check call uses a custom calling convention that also 535// preserves X0-X8 and Q0-Q7. 536def CSR_Win_AArch64_CFGuard_Check : CalleeSavedRegs<(add CSR_Win_AArch64_AAPCS, 537 (sequence "X%u", 0, 8), 538 (sequence "Q%u", 0, 7))>; 539 540// To match the x64 calling convention, Arm64EC thunks preserve q6-q15. 541def CSR_Win_AArch64_Arm64EC_Thunk : CalleeSavedRegs<(add (sequence "Q%u", 6, 15), 542 X19, X20, X21, X22, X23, X24, 543 X25, X26, X27, X28, FP, LR)>; 544 545// AArch64 PCS for vector functions (VPCS) 546// must (additionally) preserve full Q8-Q23 registers 547def CSR_AArch64_AAVPCS : CalleeSavedRegs<(add X19, X20, X21, X22, X23, X24, 548 X25, X26, X27, X28, LR, FP, 549 (sequence "Q%u", 8, 23))>; 550 551// Functions taking SVE arguments or returning an SVE type 552// must (additionally) preserve full Z8-Z23 and predicate registers P4-P15 553def CSR_AArch64_SVE_AAPCS : CalleeSavedRegs<(add (sequence "Z%u", 8, 23), 554 (sequence "P%u", 4, 15), 555 X19, X20, X21, X22, X23, X24, 556 X25, X26, X27, X28, LR, FP)>; 557 558// SME ABI support routines such as __arm_tpidr2_save/restore preserve most registers. 559def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 560 : CalleeSavedRegs<(add (sequence "Z%u", 0, 31), 561 (sequence "P%u", 0, 15), 562 (sequence "X%u", 0, 13), 563 (sequence "X%u",19, 28), 564 LR, FP)>; 565 566// SME ABI support routines __arm_sme_state preserves most registers. 567def CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 568 : CalleeSavedRegs<(add (sequence "Z%u", 0, 31), 569 (sequence "P%u", 0, 15), 570 (sequence "X%u", 2, 15), 571 (sequence "X%u",19, 28), 572 LR, FP)>; 573 574// The SMSTART/SMSTOP instructions preserve only GPR registers. 575def CSR_AArch64_SMStartStop : CalleeSavedRegs<(add (sequence "X%u", 0, 28), 576 LR, FP)>; 577 578def CSR_AArch64_AAPCS_SwiftTail 579 : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X20, X22)>; 580 581// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since 582// 'this' and the pointer return value are both passed in X0 in these cases, 583// this can be partially modelled by treating X0 as a callee-saved register; 584// only the resulting RegMask is used; the SaveList is ignored 585// 586// (For generic ARM 64-bit ABI code, clang will not generate constructors or 587// destructors with 'this' returns, so this RegMask will not be used in that 588// case) 589def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>; 590 591def CSR_AArch64_AAPCS_SwiftError 592 : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X21)>; 593 594// The ELF stub used for TLS-descriptor access saves every feasible 595// register. Only X0 and LR are clobbered. 596def CSR_AArch64_TLS_ELF 597 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP, 598 (sequence "Q%u", 0, 31))>; 599 600def CSR_AArch64_AllRegs 601 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP, 602 (sequence "X%u", 0, 28), FP, LR, SP, 603 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31), 604 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31), 605 (sequence "Q%u", 0, 31))>; 606 607def CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>; 608 609def CSR_AArch64_RT_MostRegs : CalleeSavedRegs<(add CSR_AArch64_AAPCS, 610 (sequence "X%u", 9, 15))>; 611 612def CSR_AArch64_RT_AllRegs : CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs, 613 (sequence "Q%u", 8, 31))>; 614 615def CSR_AArch64_StackProbe_Windows 616 : CalleeSavedRegs<(add (sequence "X%u", 0, 15), 617 (sequence "X%u", 18, 28), FP, SP, 618 (sequence "Q%u", 0, 31))>; 619 620// Darwin variants of AAPCS. 621// Darwin puts the frame-record at the top of the callee-save area. 622def CSR_Darwin_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22, 623 X23, X24, X25, X26, X27, X28, 624 D8, D9, D10, D11, 625 D12, D13, D14, D15)>; 626 627def CSR_Darwin_AArch64_AAVPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, 628 X22, X23, X24, X25, X26, X27, 629 X28, (sequence "Q%u", 8, 23))>; 630 631// For Windows calling convention on a non-windows OS, where X18 is treated 632// as reserved, back up X18 when entering non-windows code (marked with the 633// Windows calling convention) and restore when returning regardless of 634// whether the individual function uses it - it might call other functions 635// that clobber it. 636def CSR_Darwin_AArch64_AAPCS_Win64 637 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, X18)>; 638 639def CSR_Darwin_AArch64_AAPCS_ThisReturn 640 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, X0)>; 641 642def CSR_Darwin_AArch64_AAPCS_SwiftError 643 : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X21)>; 644 645def CSR_Darwin_AArch64_AAPCS_SwiftTail 646 : CalleeSavedRegs<(sub CSR_Darwin_AArch64_AAPCS, X20, X22)>; 647 648// The function used by Darwin to obtain the address of a thread-local variable 649// guarantees more than a normal AAPCS function. x16 and x17 are used on the 650// fast path for calculation, but other registers except X0 (argument/return) 651// and LR (it is a call, after all) are preserved. 652def CSR_Darwin_AArch64_TLS 653 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17), 654 FP, 655 (sequence "Q%u", 0, 31))>; 656 657// We can only handle a register pair with adjacent registers, the register pair 658// should belong to the same class as well. Since the access function on the 659// fast path calls a function that follows CSR_Darwin_AArch64_TLS, 660// CSR_Darwin_AArch64_CXX_TLS should be a subset of CSR_Darwin_AArch64_TLS. 661def CSR_Darwin_AArch64_CXX_TLS 662 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, 663 (sub (sequence "X%u", 1, 28), X9, X15, X16, X17, X18, X19), 664 (sequence "D%u", 0, 31))>; 665 666// CSRs that are handled by prologue, epilogue. 667def CSR_Darwin_AArch64_CXX_TLS_PE 668 : CalleeSavedRegs<(add LR, FP)>; 669 670// CSRs that are handled explicitly via copies. 671def CSR_Darwin_AArch64_CXX_TLS_ViaCopy 672 : CalleeSavedRegs<(sub CSR_Darwin_AArch64_CXX_TLS, LR, FP)>; 673 674def CSR_Darwin_AArch64_RT_MostRegs 675 : CalleeSavedRegs<(add CSR_Darwin_AArch64_AAPCS, (sequence "X%u", 9, 15))>; 676 677def CSR_Darwin_AArch64_RT_AllRegs 678 : CalleeSavedRegs<(add CSR_Darwin_AArch64_RT_MostRegs, (sequence "Q%u", 8, 31))>; 679 680// Variants of the standard calling conventions for shadow call stack. 681// These all preserve x18 in addition to any other registers. 682def CSR_AArch64_NoRegs_SCS 683 : CalleeSavedRegs<(add CSR_AArch64_NoRegs, X18)>; 684def CSR_AArch64_AllRegs_SCS 685 : CalleeSavedRegs<(add CSR_AArch64_AllRegs, X18)>; 686def CSR_AArch64_AAPCS_SwiftError_SCS 687 : CalleeSavedRegs<(add CSR_AArch64_AAPCS_SwiftError, X18)>; 688def CSR_AArch64_RT_MostRegs_SCS 689 : CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs, X18)>; 690def CSR_AArch64_RT_AllRegs_SCS 691 : CalleeSavedRegs<(add CSR_AArch64_RT_AllRegs, X18)>; 692def CSR_AArch64_AAVPCS_SCS 693 : CalleeSavedRegs<(add CSR_AArch64_AAVPCS, X18)>; 694def CSR_AArch64_SVE_AAPCS_SCS 695 : CalleeSavedRegs<(add CSR_AArch64_SVE_AAPCS, X18)>; 696def CSR_AArch64_AAPCS_SCS 697 : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X18)>; 698