1//===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This describes the calling conventions for the PowerPC 32- and 64-bit 10// architectures. 11// 12//===----------------------------------------------------------------------===// 13 14/// CCIfSubtarget - Match if the current subtarget has a feature F. 15class CCIfSubtarget<string F, CCAction A> 16 : CCIf<!strconcat("static_cast<const PPCSubtarget&>" 17 "(State.getMachineFunction().getSubtarget()).", 18 F), 19 A>; 20class CCIfNotSubtarget<string F, CCAction A> 21 : CCIf<!strconcat("!static_cast<const PPCSubtarget&>" 22 "(State.getMachineFunction().getSubtarget()).", 23 F), 24 A>; 25class CCIfOrigArgWasNotPPCF128<CCAction A> 26 : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 27 A>; 28class CCIfOrigArgWasPPCF128<CCAction A> 29 : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 30 A>; 31 32//===----------------------------------------------------------------------===// 33// Return Value Calling Convention 34//===----------------------------------------------------------------------===// 35 36// PPC64 AnyReg return-value convention. No explicit register is specified for 37// the return-value. The register allocator is allowed and expected to choose 38// any free register. 39// 40// This calling convention is currently only supported by the stackmap and 41// patchpoint intrinsics. All other uses will result in an assert on Debug 42// builds. On Release builds we fallback to the PPC C calling convention. 43def RetCC_PPC64_AnyReg : CallingConv<[ 44 CCCustom<"CC_PPC_AnyReg_Error"> 45]>; 46 47// Return-value convention for PowerPC coldcc. 48let Entry = 1 in 49def RetCC_PPC_Cold : CallingConv<[ 50 // Use the same return registers as RetCC_PPC, but limited to only 51 // one return value. The remaining return values will be saved to 52 // the stack. 53 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 54 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 55 56 CCIfType<[i32], CCAssignToReg<[R3]>>, 57 CCIfType<[i64], CCAssignToReg<[X3]>>, 58 CCIfType<[i128], CCAssignToReg<[X3]>>, 59 60 CCIfType<[f32], CCAssignToReg<[F1]>>, 61 CCIfType<[f64], CCAssignToReg<[F1]>>, 62 CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2]>>>, 63 64 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 65 CCIfSubtarget<"hasAltivec()", 66 CCAssignToReg<[V2]>>> 67]>; 68 69// Return-value convention for PowerPC 70let Entry = 1 in 71def RetCC_PPC : CallingConv<[ 72 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 73 74 // On PPC64, integer return values are always promoted to i64 75 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 76 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 77 78 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 79 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 80 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 81 82 // Floating point types returned as "direct" go into F1 .. F8; note that 83 // only the ELFv2 ABI fully utilizes all these registers. 84 CCIfNotSubtarget<"hasSPE()", 85 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 86 CCIfNotSubtarget<"hasSPE()", 87 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 88 CCIfSubtarget<"hasSPE()", 89 CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 90 CCIfSubtarget<"hasSPE()", 91 CCIfType<[f64], CCCustom<"CC_PPC32_SPE_RetF64">>>, 92 93 // For P9, f128 are passed in vector registers. 94 CCIfType<[f128], 95 CCIfSubtarget<"hasAltivec()", 96 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 97 98 // Vector types returned as "direct" go into V2 .. V9; note that only the 99 // ELFv2 ABI fully utilizes all these registers. 100 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 101 CCIfSubtarget<"hasAltivec()", 102 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 103]>; 104 105// No explicit register is specified for the AnyReg calling convention. The 106// register allocator may assign the arguments to any free register. 107// 108// This calling convention is currently only supported by the stackmap and 109// patchpoint intrinsics. All other uses will result in an assert on Debug 110// builds. On Release builds we fallback to the PPC C calling convention. 111def CC_PPC64_AnyReg : CallingConv<[ 112 CCCustom<"CC_PPC_AnyReg_Error"> 113]>; 114 115// Note that we don't currently have calling conventions for 64-bit 116// PowerPC, but handle all the complexities of the ABI in the lowering 117// logic. FIXME: See if the logic can be simplified with use of CCs. 118// This may require some extensions to current table generation. 119 120// Simple calling convention for 64-bit ELF PowerPC fast isel. 121// Only handle ints and floats. All ints are promoted to i64. 122// Vector types and quadword ints are not handled. 123let Entry = 1 in 124def CC_PPC64_ELF_FIS : CallingConv<[ 125 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>, 126 127 CCIfType<[i1], CCPromoteToType<i64>>, 128 CCIfType<[i8], CCPromoteToType<i64>>, 129 CCIfType<[i16], CCPromoteToType<i64>>, 130 CCIfType<[i32], CCPromoteToType<i64>>, 131 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>, 132 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>> 133]>; 134 135// Simple return-value convention for 64-bit ELF PowerPC fast isel. 136// All small ints are promoted to i64. Vector types, quadword ints, 137// and multiple register returns are "supported" to avoid compile 138// errors, but none are handled by the fast selector. 139let Entry = 1 in 140def RetCC_PPC64_ELF_FIS : CallingConv<[ 141 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 142 143 CCIfType<[i1], CCPromoteToType<i64>>, 144 CCIfType<[i8], CCPromoteToType<i64>>, 145 CCIfType<[i16], CCPromoteToType<i64>>, 146 CCIfType<[i32], CCPromoteToType<i64>>, 147 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 148 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 149 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 150 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 151 CCIfType<[f128], 152 CCIfSubtarget<"hasAltivec()", 153 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 154 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 155 CCIfSubtarget<"hasAltivec()", 156 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 157]>; 158 159//===----------------------------------------------------------------------===// 160// PowerPC System V Release 4 32-bit ABI 161//===----------------------------------------------------------------------===// 162 163def CC_PPC32_SVR4_Common : CallingConv<[ 164 CCIfType<[i1], CCPromoteToType<i32>>, 165 166 // The ABI requires i64 to be passed in two adjacent registers with the first 167 // register having an odd register number. 168 CCIfType<[i32], 169 CCIfSplit<CCIfSubtarget<"useSoftFloat()", 170 CCIfOrigArgWasNotPPCF128< 171 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>, 172 173 CCIfType<[i32], 174 CCIfSplit<CCIfNotSubtarget<"useSoftFloat()", 175 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>, 176 CCIfType<[f64], 177 CCIfSubtarget<"hasSPE()", 178 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>, 179 CCIfSplit<CCIfSubtarget<"useSoftFloat()", 180 CCIfOrigArgWasPPCF128<CCCustom< 181 "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>, 182 183 // The 'nest' parameter, if any, is passed in R11. 184 CCIfNest<CCAssignToReg<[R11]>>, 185 186 // The first 8 integer arguments are passed in integer registers. 187 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 188 189 // Make sure the i64 words from a long double are either both passed in 190 // registers or both passed on the stack. 191 CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>, 192 193 // FP values are passed in F1 - F8. 194 CCIfType<[f32, f64], 195 CCIfNotSubtarget<"hasSPE()", 196 CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 197 CCIfType<[f64], 198 CCIfSubtarget<"hasSPE()", 199 CCCustom<"CC_PPC32_SPE_CustomSplitFP64">>>, 200 CCIfType<[f32], 201 CCIfSubtarget<"hasSPE()", 202 CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 203 204 // Split arguments have an alignment of 8 bytes on the stack. 205 CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>, 206 207 CCIfType<[i32], CCAssignToStack<4, 4>>, 208 209 CCIfType<[f32], CCAssignToStack<4, 4>>, 210 CCIfType<[f64], CCAssignToStack<8, 8>>, 211 212 // Vectors and float128 get 16-byte stack slots that are 16-byte aligned. 213 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>, 214 CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToStack<16, 16>>> 215]>; 216 217// This calling convention puts vector arguments always on the stack. It is used 218// to assign vector arguments which belong to the variable portion of the 219// parameter list of a variable argument function. 220let Entry = 1 in 221def CC_PPC32_SVR4_VarArg : CallingConv<[ 222 CCDelegateTo<CC_PPC32_SVR4_Common> 223]>; 224 225// In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to 226// put vector arguments in vector registers before putting them on the stack. 227let Entry = 1 in 228def CC_PPC32_SVR4 : CallingConv<[ 229 // The first 12 Vector arguments are passed in AltiVec registers. 230 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 231 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 232 V8, V9, V10, V11, V12, V13]>>>, 233 234 // Float128 types treated as vector arguments. 235 CCIfType<[f128], 236 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 237 V8, V9, V10, V11, V12, V13]>>>, 238 239 CCDelegateTo<CC_PPC32_SVR4_Common> 240]>; 241 242// Helper "calling convention" to handle aggregate by value arguments. 243// Aggregate by value arguments are always placed in the local variable space 244// of the caller. This calling convention is only used to assign those stack 245// offsets in the callers stack frame. 246// 247// Still, the address of the aggregate copy in the callers stack frame is passed 248// in a GPR (or in the parameter list area if all GPRs are allocated) from the 249// caller to the callee. The location for the address argument is assigned by 250// the CC_PPC32_SVR4 calling convention. 251// 252// The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are 253// not passed by value. 254 255let Entry = 1 in 256def CC_PPC32_SVR4_ByVal : CallingConv<[ 257 CCIfByVal<CCPassByVal<4, 4>>, 258 259 CCCustom<"CC_PPC32_SVR4_Custom_Dummy"> 260]>; 261 262def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27, 263 V28, V29, V30, V31)>; 264 265// SPE does not use FPRs, so break out the common register set as base. 266def CSR_SVR432_COMM : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, 267 R21, R22, R23, R24, R25, R26, R27, 268 R28, R29, R30, R31, CR2, CR3, CR4 269 )>; 270def CSR_SVR432 : CalleeSavedRegs<(add CSR_SVR432_COMM, F14, F15, F16, F17, F18, 271 F19, F20, F21, F22, F23, F24, F25, F26, 272 F27, F28, F29, F30, F31 273 )>; 274def CSR_SPE : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, S22, 275 S23, S24, S25, S26, S27, S28, S29, S30 276 )>; 277 278def CSR_SPE_NO_S30_31 : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, 279 S22, S23, S24, S25, S26, S27, S28, S29 280 )>; 281 282def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>; 283 284def CSR_SVR432_SPE : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE)>; 285 286def CSR_SVR432_SPE_NO_S30_31 : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE_NO_S30_31)>; 287 288def CSR_AIX32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20, 289 R21, R22, R23, R24, R25, R26, R27, R28, 290 R29, R30, R31, F14, F15, F16, F17, F18, 291 F19, F20, F21, F22, F23, F24, F25, F26, 292 F27, F28, F29, F30, F31, CR2, CR3, CR4 293 )>; 294 295def CSR_AIX32_Altivec : CalleeSavedRegs<(add CSR_AIX32, CSR_Altivec)>; 296 297// Common CalleeSavedRegs for SVR4 and AIX. 298def CSR_PPC64 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, 299 X21, X22, X23, X24, X25, X26, X27, X28, 300 X29, X30, X31, F14, F15, F16, F17, F18, 301 F19, F20, F21, F22, F23, F24, F25, F26, 302 F27, F28, F29, F30, F31, CR2, CR3, CR4 303 )>; 304 305 306def CSR_PPC64_Altivec : CalleeSavedRegs<(add CSR_PPC64, CSR_Altivec)>; 307 308def CSR_PPC64_R2 : CalleeSavedRegs<(add CSR_PPC64, X2)>; 309 310def CSR_PPC64_R2_Altivec : CalleeSavedRegs<(add CSR_PPC64_Altivec, X2)>; 311 312def CSR_NoRegs : CalleeSavedRegs<(add)>; 313 314// coldcc calling convection marks most registers as non-volatile. 315// Do not include r1 since the stack pointer is never considered a CSR. 316// Do not include r2, since it is the TOC register and is added depending 317// on whether or not the function uses the TOC and is a non-leaf. 318// Do not include r0,r11,r13 as they are optional in functional linkage 319// and value may be altered by inter-library calls. 320// Do not include r12 as it is used as a scratch register. 321// Do not include return registers r3, f1, v2. 322def CSR_SVR32_ColdCC_Common : CalleeSavedRegs<(add (sequence "R%u", 4, 10), 323 (sequence "R%u", 14, 31), 324 (sequence "CR%u", 0, 7))>; 325 326def CSR_SVR32_ColdCC : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 327 F0, (sequence "F%u", 2, 31))>; 328 329 330def CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC, 331 (sequence "V%u", 0, 1), 332 (sequence "V%u", 3, 31))>; 333 334def CSR_SVR32_ColdCC_SPE : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 335 (sequence "S%u", 4, 10), 336 (sequence "S%u", 14, 31))>; 337 338def CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10), 339 (sequence "X%u", 14, 31), 340 F0, (sequence "F%u", 2, 31), 341 (sequence "CR%u", 0, 7))>; 342 343def CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>; 344 345def CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC, 346 (sequence "V%u", 0, 1), 347 (sequence "V%u", 3, 31))>; 348 349def CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>; 350 351def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10), 352 (sequence "X%u", 14, 31), 353 (sequence "F%u", 0, 31), 354 (sequence "CR%u", 0, 7))>; 355 356def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 357 (sequence "V%u", 0, 31))>; 358 359def CSR_64_AllRegs_AIX_Dflt_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 360 (sequence "V%u", 0, 19))>; 361 362def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 363 (sequence "VSL%u", 0, 31))>; 364 365def CSR_64_AllRegs_AIX_Dflt_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 366 (sequence "VSL%u", 0, 19))>; 367 368def CSR_ALL_VSRP : CalleeSavedRegs<(sequence "VSRp%u", 0, 31)>; 369 370def CSR_VSRP : 371 CalleeSavedRegs<(add VSRp26, VSRp27, VSRp28, VSRp29, VSRp30, VSRp31)>; 372 373def CSR_SVR432_VSRP : CalleeSavedRegs<(add CSR_SVR432_Altivec, CSR_VSRP)>; 374 375def CSR_SVR464_VSRP : CalleeSavedRegs<(add CSR_PPC64_Altivec, CSR_VSRP)>; 376 377def CSR_SVR464_R2_VSRP : CalleeSavedRegs<(add CSR_SVR464_VSRP, X2)>; 378 379def CSR_SVR32_ColdCC_VSRP : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Altivec, 380 (sub CSR_ALL_VSRP, VSRp17))>; 381 382def CSR_SVR64_ColdCC_VSRP : CalleeSavedRegs<(add CSR_SVR64_ColdCC, 383 (sub CSR_ALL_VSRP, VSRp17))>; 384 385def CSR_SVR64_ColdCC_R2_VSRP : CalleeSavedRegs<(add CSR_SVR64_ColdCC_VSRP, X2)>; 386 387def CSR_64_AllRegs_VSRP : 388 CalleeSavedRegs<(add CSR_64_AllRegs_VSX, CSR_ALL_VSRP)>; 389 390def CSR_AIX64_VSRP : CalleeSavedRegs<(add CSR_PPC64_Altivec, CSR_VSRP)>; 391 392def CSR_AIX64_R2_VSRP : CalleeSavedRegs<(add CSR_AIX64_VSRP, X2)>; 393 394def CSR_AIX32_VSRP : CalleeSavedRegs<(add CSR_AIX32_Altivec, CSR_VSRP)>; 395