1*0b57cec5SDimitry Andric//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=// 2*0b57cec5SDimitry Andric// 3*0b57cec5SDimitry Andric// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*0b57cec5SDimitry Andric// See https://llvm.org/LICENSE.txt for license information. 5*0b57cec5SDimitry Andric// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*0b57cec5SDimitry Andric// 7*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 8*0b57cec5SDimitry Andric// 9*0b57cec5SDimitry Andric// This describes the calling conventions for AArch64 architecture. 10*0b57cec5SDimitry Andric// 11*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 12*0b57cec5SDimitry Andric 13*0b57cec5SDimitry Andric/// CCIfAlign - Match of the original alignment of the arg 14*0b57cec5SDimitry Andricclass CCIfAlign<string Align, CCAction A> : 15*0b57cec5SDimitry Andric CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>; 16*0b57cec5SDimitry Andric/// CCIfBigEndian - Match only if we're in big endian mode. 17*0b57cec5SDimitry Andricclass CCIfBigEndian<CCAction A> : 18*0b57cec5SDimitry Andric CCIf<"State.getMachineFunction().getDataLayout().isBigEndian()", A>; 19*0b57cec5SDimitry Andric 20*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 21*0b57cec5SDimitry Andric// ARM AAPCS64 Calling Convention 22*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 23*0b57cec5SDimitry Andric 24*0b57cec5SDimitry Andriclet Entry = 1 in 25*0b57cec5SDimitry Andricdef CC_AArch64_AAPCS : CallingConv<[ 26*0b57cec5SDimitry Andric CCIfType<[iPTR], CCBitConvertToType<i64>>, 27*0b57cec5SDimitry Andric CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 28*0b57cec5SDimitry Andric CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 29*0b57cec5SDimitry Andric 30*0b57cec5SDimitry Andric // Big endian vectors must be passed as if they were 1-element vectors so that 31*0b57cec5SDimitry Andric // their lanes are in a consistent order. 32*0b57cec5SDimitry Andric CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8], 33*0b57cec5SDimitry Andric CCBitConvertToType<f64>>>, 34*0b57cec5SDimitry Andric CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 35*0b57cec5SDimitry Andric CCBitConvertToType<f128>>>, 36*0b57cec5SDimitry Andric 37*0b57cec5SDimitry Andric // In AAPCS, an SRet is passed in X8, not X0 like a normal pointer parameter. 38*0b57cec5SDimitry Andric // However, on windows, in some circumstances, the SRet is passed in X0 or X1 39*0b57cec5SDimitry Andric // instead. The presence of the inreg attribute indicates that SRet is 40*0b57cec5SDimitry Andric // passed in the alternative register (X0 or X1), not X8: 41*0b57cec5SDimitry Andric // - X0 for non-instance methods. 42*0b57cec5SDimitry Andric // - X1 for instance methods. 43*0b57cec5SDimitry Andric 44*0b57cec5SDimitry Andric // The "sret" attribute identifies indirect returns. 45*0b57cec5SDimitry Andric // The "inreg" attribute identifies non-aggregate types. 46*0b57cec5SDimitry Andric // The position of the "sret" attribute identifies instance/non-instance 47*0b57cec5SDimitry Andric // methods. 48*0b57cec5SDimitry Andric // "sret" on argument 0 means non-instance methods. 49*0b57cec5SDimitry Andric // "sret" on argument 1 means instance methods. 50*0b57cec5SDimitry Andric 51*0b57cec5SDimitry Andric CCIfInReg<CCIfType<[i64], 52*0b57cec5SDimitry Andric CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1], [W0, W1]>>>>>, 53*0b57cec5SDimitry Andric 54*0b57cec5SDimitry Andric CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>, 55*0b57cec5SDimitry Andric 56*0b57cec5SDimitry Andric // Put ByVal arguments directly on the stack. Minimum size and alignment of a 57*0b57cec5SDimitry Andric // slot is 64-bit. 58*0b57cec5SDimitry Andric CCIfByVal<CCPassByVal<8, 8>>, 59*0b57cec5SDimitry Andric 60*0b57cec5SDimitry Andric // The 'nest' parameter, if any, is passed in X18. 61*0b57cec5SDimitry Andric // Darwin uses X18 as the platform register and hence 'nest' isn't currently 62*0b57cec5SDimitry Andric // supported there. 63*0b57cec5SDimitry Andric CCIfNest<CCAssignToReg<[X18]>>, 64*0b57cec5SDimitry Andric 65*0b57cec5SDimitry Andric // Pass SwiftSelf in a callee saved register. 66*0b57cec5SDimitry Andric CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>, 67*0b57cec5SDimitry Andric 68*0b57cec5SDimitry Andric // A SwiftError is passed in X21. 69*0b57cec5SDimitry Andric CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>, 70*0b57cec5SDimitry Andric 71*0b57cec5SDimitry Andric CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 72*0b57cec5SDimitry Andric 73*0b57cec5SDimitry Andric // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 74*0b57cec5SDimitry Andric // up to eight each of GPR and FPR. 75*0b57cec5SDimitry Andric CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 76*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 77*0b57cec5SDimitry Andric [X0, X1, X2, X3, X4, X5, X6, X7]>>, 78*0b57cec5SDimitry Andric // i128 is split to two i64s, we can't fit half to register X7. 79*0b57cec5SDimitry Andric CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6], 80*0b57cec5SDimitry Andric [X0, X1, X3, X5]>>>, 81*0b57cec5SDimitry Andric 82*0b57cec5SDimitry Andric // i128 is split to two i64s, and its stack alignment is 16 bytes. 83*0b57cec5SDimitry Andric CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 84*0b57cec5SDimitry Andric 85*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 86*0b57cec5SDimitry Andric [W0, W1, W2, W3, W4, W5, W6, W7]>>, 87*0b57cec5SDimitry Andric CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 88*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 89*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 90*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 91*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 92*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 93*0b57cec5SDimitry Andric CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 94*0b57cec5SDimitry Andric CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 95*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 96*0b57cec5SDimitry Andric CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 97*0b57cec5SDimitry Andric CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 98*0b57cec5SDimitry Andric 99*0b57cec5SDimitry Andric // If more than will fit in registers, pass them on the stack instead. 100*0b57cec5SDimitry Andric CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>, 101*0b57cec5SDimitry Andric CCIfType<[i32, f32], CCAssignToStack<8, 8>>, 102*0b57cec5SDimitry Andric CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 103*0b57cec5SDimitry Andric CCAssignToStack<8, 8>>, 104*0b57cec5SDimitry Andric CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 105*0b57cec5SDimitry Andric CCAssignToStack<16, 16>> 106*0b57cec5SDimitry Andric]>; 107*0b57cec5SDimitry Andric 108*0b57cec5SDimitry Andriclet Entry = 1 in 109*0b57cec5SDimitry Andricdef RetCC_AArch64_AAPCS : CallingConv<[ 110*0b57cec5SDimitry Andric CCIfType<[iPTR], CCBitConvertToType<i64>>, 111*0b57cec5SDimitry Andric CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 112*0b57cec5SDimitry Andric CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>, 113*0b57cec5SDimitry Andric 114*0b57cec5SDimitry Andric CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>, 115*0b57cec5SDimitry Andric 116*0b57cec5SDimitry Andric // Big endian vectors must be passed as if they were 1-element vectors so that 117*0b57cec5SDimitry Andric // their lanes are in a consistent order. 118*0b57cec5SDimitry Andric CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8], 119*0b57cec5SDimitry Andric CCBitConvertToType<f64>>>, 120*0b57cec5SDimitry Andric CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8], 121*0b57cec5SDimitry Andric CCBitConvertToType<f128>>>, 122*0b57cec5SDimitry Andric 123*0b57cec5SDimitry Andric CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 124*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 125*0b57cec5SDimitry Andric [X0, X1, X2, X3, X4, X5, X6, X7]>>, 126*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 127*0b57cec5SDimitry Andric [W0, W1, W2, W3, W4, W5, W6, W7]>>, 128*0b57cec5SDimitry Andric CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 129*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 130*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 131*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 132*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 133*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 134*0b57cec5SDimitry Andric CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 135*0b57cec5SDimitry Andric CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 136*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 137*0b57cec5SDimitry Andric CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 138*0b57cec5SDimitry Andric CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> 139*0b57cec5SDimitry Andric]>; 140*0b57cec5SDimitry Andric 141*0b57cec5SDimitry Andric// Vararg functions on windows pass floats in integer registers 142*0b57cec5SDimitry Andriclet Entry = 1 in 143*0b57cec5SDimitry Andricdef CC_AArch64_Win64_VarArg : CallingConv<[ 144*0b57cec5SDimitry Andric CCIfType<[f16, f32], CCPromoteToType<f64>>, 145*0b57cec5SDimitry Andric CCIfType<[f64], CCBitConvertToType<i64>>, 146*0b57cec5SDimitry Andric CCDelegateTo<CC_AArch64_AAPCS> 147*0b57cec5SDimitry Andric]>; 148*0b57cec5SDimitry Andric 149*0b57cec5SDimitry Andric 150*0b57cec5SDimitry Andric// Darwin uses a calling convention which differs in only two ways 151*0b57cec5SDimitry Andric// from the standard one at this level: 152*0b57cec5SDimitry Andric// + i128s (i.e. split i64s) don't need even registers. 153*0b57cec5SDimitry Andric// + Stack slots are sized as needed rather than being at least 64-bit. 154*0b57cec5SDimitry Andriclet Entry = 1 in 155*0b57cec5SDimitry Andricdef CC_AArch64_DarwinPCS : CallingConv<[ 156*0b57cec5SDimitry Andric CCIfType<[iPTR], CCBitConvertToType<i64>>, 157*0b57cec5SDimitry Andric CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 158*0b57cec5SDimitry Andric CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 159*0b57cec5SDimitry Andric 160*0b57cec5SDimitry Andric // An SRet is passed in X8, not X0 like a normal pointer parameter. 161*0b57cec5SDimitry Andric CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>, 162*0b57cec5SDimitry Andric 163*0b57cec5SDimitry Andric // Put ByVal arguments directly on the stack. Minimum size and alignment of a 164*0b57cec5SDimitry Andric // slot is 64-bit. 165*0b57cec5SDimitry Andric CCIfByVal<CCPassByVal<8, 8>>, 166*0b57cec5SDimitry Andric 167*0b57cec5SDimitry Andric // Pass SwiftSelf in a callee saved register. 168*0b57cec5SDimitry Andric CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>, 169*0b57cec5SDimitry Andric 170*0b57cec5SDimitry Andric // A SwiftError is passed in X21. 171*0b57cec5SDimitry Andric CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>, 172*0b57cec5SDimitry Andric 173*0b57cec5SDimitry Andric CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>, 174*0b57cec5SDimitry Andric 175*0b57cec5SDimitry Andric // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers, 176*0b57cec5SDimitry Andric // up to eight each of GPR and FPR. 177*0b57cec5SDimitry Andric CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 178*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 179*0b57cec5SDimitry Andric [X0, X1, X2, X3, X4, X5, X6, X7]>>, 180*0b57cec5SDimitry Andric // i128 is split to two i64s, we can't fit half to register X7. 181*0b57cec5SDimitry Andric CCIfType<[i64], 182*0b57cec5SDimitry Andric CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6], 183*0b57cec5SDimitry Andric [W0, W1, W2, W3, W4, W5, W6]>>>, 184*0b57cec5SDimitry Andric // i128 is split to two i64s, and its stack alignment is 16 bytes. 185*0b57cec5SDimitry Andric CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>, 186*0b57cec5SDimitry Andric 187*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 188*0b57cec5SDimitry Andric [W0, W1, W2, W3, W4, W5, W6, W7]>>, 189*0b57cec5SDimitry Andric CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7], 190*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 191*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 192*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 193*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 194*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 195*0b57cec5SDimitry Andric CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 196*0b57cec5SDimitry Andric CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 197*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 198*0b57cec5SDimitry Andric CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 199*0b57cec5SDimitry Andric CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 200*0b57cec5SDimitry Andric 201*0b57cec5SDimitry Andric // If more than will fit in registers, pass them on the stack instead. 202*0b57cec5SDimitry Andric CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>, 203*0b57cec5SDimitry Andric CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>, 204*0b57cec5SDimitry Andric CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 205*0b57cec5SDimitry Andric CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16], 206*0b57cec5SDimitry Andric CCAssignToStack<8, 8>>, 207*0b57cec5SDimitry Andric CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 208*0b57cec5SDimitry Andric CCAssignToStack<16, 16>> 209*0b57cec5SDimitry Andric]>; 210*0b57cec5SDimitry Andric 211*0b57cec5SDimitry Andriclet Entry = 1 in 212*0b57cec5SDimitry Andricdef CC_AArch64_DarwinPCS_VarArg : CallingConv<[ 213*0b57cec5SDimitry Andric CCIfType<[iPTR], CCBitConvertToType<i64>>, 214*0b57cec5SDimitry Andric CCIfType<[v2f32], CCBitConvertToType<v2i32>>, 215*0b57cec5SDimitry Andric CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>, 216*0b57cec5SDimitry Andric 217*0b57cec5SDimitry Andric CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>, 218*0b57cec5SDimitry Andric 219*0b57cec5SDimitry Andric // Handle all scalar types as either i64 or f64. 220*0b57cec5SDimitry Andric CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 221*0b57cec5SDimitry Andric CCIfType<[f16, f32], CCPromoteToType<f64>>, 222*0b57cec5SDimitry Andric 223*0b57cec5SDimitry Andric // Everything is on the stack. 224*0b57cec5SDimitry Andric // i128 is split to two i64s, and its stack alignment is 16 bytes. 225*0b57cec5SDimitry Andric CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>, 226*0b57cec5SDimitry Andric CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16], 227*0b57cec5SDimitry Andric CCAssignToStack<8, 8>>, 228*0b57cec5SDimitry Andric CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16], 229*0b57cec5SDimitry Andric CCAssignToStack<16, 16>> 230*0b57cec5SDimitry Andric]>; 231*0b57cec5SDimitry Andric 232*0b57cec5SDimitry Andric// The WebKit_JS calling convention only passes the first argument (the callee) 233*0b57cec5SDimitry Andric// in register and the remaining arguments on stack. We allow 32bit stack slots, 234*0b57cec5SDimitry Andric// so that WebKit can write partial values in the stack and define the other 235*0b57cec5SDimitry Andric// 32bit quantity as undef. 236*0b57cec5SDimitry Andriclet Entry = 1 in 237*0b57cec5SDimitry Andricdef CC_AArch64_WebKit_JS : CallingConv<[ 238*0b57cec5SDimitry Andric // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0). 239*0b57cec5SDimitry Andric CCIfType<[i1, i8, i16], CCPromoteToType<i32>>, 240*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>, 241*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>, 242*0b57cec5SDimitry Andric 243*0b57cec5SDimitry Andric // Pass the remaining arguments on the stack instead. 244*0b57cec5SDimitry Andric CCIfType<[i32, f32], CCAssignToStack<4, 4>>, 245*0b57cec5SDimitry Andric CCIfType<[i64, f64], CCAssignToStack<8, 8>> 246*0b57cec5SDimitry Andric]>; 247*0b57cec5SDimitry Andric 248*0b57cec5SDimitry Andriclet Entry = 1 in 249*0b57cec5SDimitry Andricdef RetCC_AArch64_WebKit_JS : CallingConv<[ 250*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7], 251*0b57cec5SDimitry Andric [X0, X1, X2, X3, X4, X5, X6, X7]>>, 252*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7], 253*0b57cec5SDimitry Andric [W0, W1, W2, W3, W4, W5, W6, W7]>>, 254*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7], 255*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>, 256*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7], 257*0b57cec5SDimitry Andric [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>> 258*0b57cec5SDimitry Andric]>; 259*0b57cec5SDimitry Andric 260*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 261*0b57cec5SDimitry Andric// ARM64 Calling Convention for GHC 262*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 263*0b57cec5SDimitry Andric 264*0b57cec5SDimitry Andric// This calling convention is specific to the Glasgow Haskell Compiler. 265*0b57cec5SDimitry Andric// The only documentation is the GHC source code, specifically the C header 266*0b57cec5SDimitry Andric// file: 267*0b57cec5SDimitry Andric// 268*0b57cec5SDimitry Andric// https://github.com/ghc/ghc/blob/master/includes/stg/MachRegs.h 269*0b57cec5SDimitry Andric// 270*0b57cec5SDimitry Andric// which defines the registers for the Spineless Tagless G-Machine (STG) that 271*0b57cec5SDimitry Andric// GHC uses to implement lazy evaluation. The generic STG machine has a set of 272*0b57cec5SDimitry Andric// registers which are mapped to appropriate set of architecture specific 273*0b57cec5SDimitry Andric// registers for each CPU architecture. 274*0b57cec5SDimitry Andric// 275*0b57cec5SDimitry Andric// The STG Machine is documented here: 276*0b57cec5SDimitry Andric// 277*0b57cec5SDimitry Andric// https://ghc.haskell.org/trac/ghc/wiki/Commentary/Compiler/GeneratedCode 278*0b57cec5SDimitry Andric// 279*0b57cec5SDimitry Andric// The AArch64 register mapping is under the heading "The ARMv8/AArch64 ABI 280*0b57cec5SDimitry Andric// register mapping". 281*0b57cec5SDimitry Andric 282*0b57cec5SDimitry Andriclet Entry = 1 in 283*0b57cec5SDimitry Andricdef CC_AArch64_GHC : CallingConv<[ 284*0b57cec5SDimitry Andric CCIfType<[iPTR], CCBitConvertToType<i64>>, 285*0b57cec5SDimitry Andric 286*0b57cec5SDimitry Andric // Handle all vector types as either f64 or v2f64. 287*0b57cec5SDimitry Andric CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 288*0b57cec5SDimitry Andric CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, f128], CCBitConvertToType<v2f64>>, 289*0b57cec5SDimitry Andric 290*0b57cec5SDimitry Andric CCIfType<[v2f64], CCAssignToReg<[Q4, Q5]>>, 291*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToReg<[S8, S9, S10, S11]>>, 292*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToReg<[D12, D13, D14, D15]>>, 293*0b57cec5SDimitry Andric 294*0b57cec5SDimitry Andric // Promote i8/i16/i32 arguments to i64. 295*0b57cec5SDimitry Andric CCIfType<[i8, i16, i32], CCPromoteToType<i64>>, 296*0b57cec5SDimitry Andric 297*0b57cec5SDimitry Andric // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim 298*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToReg<[X19, X20, X21, X22, X23, X24, X25, X26, X27, X28]>> 299*0b57cec5SDimitry Andric]>; 300*0b57cec5SDimitry Andric 301*0b57cec5SDimitry Andric// FIXME: LR is only callee-saved in the sense that *we* preserve it and are 302*0b57cec5SDimitry Andric// presumably a callee to someone. External functions may not do so, but this 303*0b57cec5SDimitry Andric// is currently safe since BL has LR as an implicit-def and what happens after a 304*0b57cec5SDimitry Andric// tail call doesn't matter. 305*0b57cec5SDimitry Andric// 306*0b57cec5SDimitry Andric// It would be better to model its preservation semantics properly (create a 307*0b57cec5SDimitry Andric// vreg on entry, use it in RET & tail call generation; make that vreg def if we 308*0b57cec5SDimitry Andric// end up saving LR as part of a call frame). Watch this space... 309*0b57cec5SDimitry Andricdef CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22, 310*0b57cec5SDimitry Andric X23, X24, X25, X26, X27, X28, 311*0b57cec5SDimitry Andric D8, D9, D10, D11, 312*0b57cec5SDimitry Andric D12, D13, D14, D15)>; 313*0b57cec5SDimitry Andric 314*0b57cec5SDimitry Andric// Win64 has unwinding codes for an (FP,LR) pair, save_fplr and save_fplr_x. 315*0b57cec5SDimitry Andric// We put FP before LR, so that frame lowering logic generates (FP,LR) pairs, 316*0b57cec5SDimitry Andric// and not (LR,FP) pairs. 317*0b57cec5SDimitry Andricdef CSR_Win_AArch64_AAPCS : CalleeSavedRegs<(add FP, LR, X19, X20, X21, X22, 318*0b57cec5SDimitry Andric X23, X24, X25, X26, X27, X28, 319*0b57cec5SDimitry Andric D8, D9, D10, D11, 320*0b57cec5SDimitry Andric D12, D13, D14, D15)>; 321*0b57cec5SDimitry Andric 322*0b57cec5SDimitry Andric// AArch64 PCS for vector functions (VPCS) 323*0b57cec5SDimitry Andric// must (additionally) preserve full Q8-Q23 registers 324*0b57cec5SDimitry Andricdef CSR_AArch64_AAVPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22, 325*0b57cec5SDimitry Andric X23, X24, X25, X26, X27, X28, 326*0b57cec5SDimitry Andric (sequence "Q%u", 8, 23))>; 327*0b57cec5SDimitry Andric 328*0b57cec5SDimitry Andric// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since 329*0b57cec5SDimitry Andric// 'this' and the pointer return value are both passed in X0 in these cases, 330*0b57cec5SDimitry Andric// this can be partially modelled by treating X0 as a callee-saved register; 331*0b57cec5SDimitry Andric// only the resulting RegMask is used; the SaveList is ignored 332*0b57cec5SDimitry Andric// 333*0b57cec5SDimitry Andric// (For generic ARM 64-bit ABI code, clang will not generate constructors or 334*0b57cec5SDimitry Andric// destructors with 'this' returns, so this RegMask will not be used in that 335*0b57cec5SDimitry Andric// case) 336*0b57cec5SDimitry Andricdef CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>; 337*0b57cec5SDimitry Andric 338*0b57cec5SDimitry Andricdef CSR_AArch64_AAPCS_SwiftError 339*0b57cec5SDimitry Andric : CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X21)>; 340*0b57cec5SDimitry Andric 341*0b57cec5SDimitry Andric// The function used by Darwin to obtain the address of a thread-local variable 342*0b57cec5SDimitry Andric// guarantees more than a normal AAPCS function. x16 and x17 are used on the 343*0b57cec5SDimitry Andric// fast path for calculation, but other registers except X0 (argument/return) 344*0b57cec5SDimitry Andric// and LR (it is a call, after all) are preserved. 345*0b57cec5SDimitry Andricdef CSR_AArch64_TLS_Darwin 346*0b57cec5SDimitry Andric : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17), 347*0b57cec5SDimitry Andric FP, 348*0b57cec5SDimitry Andric (sequence "Q%u", 0, 31))>; 349*0b57cec5SDimitry Andric 350*0b57cec5SDimitry Andric// We can only handle a register pair with adjacent registers, the register pair 351*0b57cec5SDimitry Andric// should belong to the same class as well. Since the access function on the 352*0b57cec5SDimitry Andric// fast path calls a function that follows CSR_AArch64_TLS_Darwin, 353*0b57cec5SDimitry Andric// CSR_AArch64_CXX_TLS_Darwin should be a subset of CSR_AArch64_TLS_Darwin. 354*0b57cec5SDimitry Andricdef CSR_AArch64_CXX_TLS_Darwin 355*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_AAPCS, 356*0b57cec5SDimitry Andric (sub (sequence "X%u", 1, 28), X15, X16, X17, X18), 357*0b57cec5SDimitry Andric (sequence "D%u", 0, 31))>; 358*0b57cec5SDimitry Andric 359*0b57cec5SDimitry Andric// CSRs that are handled by prologue, epilogue. 360*0b57cec5SDimitry Andricdef CSR_AArch64_CXX_TLS_Darwin_PE 361*0b57cec5SDimitry Andric : CalleeSavedRegs<(add LR, FP)>; 362*0b57cec5SDimitry Andric 363*0b57cec5SDimitry Andric// CSRs that are handled explicitly via copies. 364*0b57cec5SDimitry Andricdef CSR_AArch64_CXX_TLS_Darwin_ViaCopy 365*0b57cec5SDimitry Andric : CalleeSavedRegs<(sub CSR_AArch64_CXX_TLS_Darwin, LR, FP)>; 366*0b57cec5SDimitry Andric 367*0b57cec5SDimitry Andric// The ELF stub used for TLS-descriptor access saves every feasible 368*0b57cec5SDimitry Andric// register. Only X0 and LR are clobbered. 369*0b57cec5SDimitry Andricdef CSR_AArch64_TLS_ELF 370*0b57cec5SDimitry Andric : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP, 371*0b57cec5SDimitry Andric (sequence "Q%u", 0, 31))>; 372*0b57cec5SDimitry Andric 373*0b57cec5SDimitry Andricdef CSR_AArch64_AllRegs 374*0b57cec5SDimitry Andric : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP, 375*0b57cec5SDimitry Andric (sequence "X%u", 0, 28), FP, LR, SP, 376*0b57cec5SDimitry Andric (sequence "B%u", 0, 31), (sequence "H%u", 0, 31), 377*0b57cec5SDimitry Andric (sequence "S%u", 0, 31), (sequence "D%u", 0, 31), 378*0b57cec5SDimitry Andric (sequence "Q%u", 0, 31))>; 379*0b57cec5SDimitry Andric 380*0b57cec5SDimitry Andricdef CSR_AArch64_NoRegs : CalleeSavedRegs<(add)>; 381*0b57cec5SDimitry Andric 382*0b57cec5SDimitry Andricdef CSR_AArch64_RT_MostRegs : CalleeSavedRegs<(add CSR_AArch64_AAPCS, 383*0b57cec5SDimitry Andric (sequence "X%u", 9, 15))>; 384*0b57cec5SDimitry Andric 385*0b57cec5SDimitry Andricdef CSR_AArch64_StackProbe_Windows 386*0b57cec5SDimitry Andric : CalleeSavedRegs<(add (sequence "X%u", 0, 15), 387*0b57cec5SDimitry Andric (sequence "X%u", 18, 28), FP, SP, 388*0b57cec5SDimitry Andric (sequence "Q%u", 0, 31))>; 389*0b57cec5SDimitry Andric 390*0b57cec5SDimitry Andric// Variants of the standard calling conventions for shadow call stack. 391*0b57cec5SDimitry Andric// These all preserve x18 in addition to any other registers. 392*0b57cec5SDimitry Andricdef CSR_AArch64_NoRegs_SCS 393*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_NoRegs, X18)>; 394*0b57cec5SDimitry Andricdef CSR_AArch64_AllRegs_SCS 395*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_AllRegs, X18)>; 396*0b57cec5SDimitry Andricdef CSR_AArch64_CXX_TLS_Darwin_SCS 397*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_CXX_TLS_Darwin, X18)>; 398*0b57cec5SDimitry Andricdef CSR_AArch64_AAPCS_SwiftError_SCS 399*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_AAPCS_SwiftError, X18)>; 400*0b57cec5SDimitry Andricdef CSR_AArch64_RT_MostRegs_SCS 401*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_RT_MostRegs, X18)>; 402*0b57cec5SDimitry Andricdef CSR_AArch64_AAVPCS_SCS 403*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_AAVPCS, X18)>; 404*0b57cec5SDimitry Andricdef CSR_AArch64_AAPCS_SCS 405*0b57cec5SDimitry Andric : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X18)>; 406