1//===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This describes the calling conventions for the PowerPC 32- and 64-bit 10// architectures. 11// 12//===----------------------------------------------------------------------===// 13 14/// CCIfSubtarget - Match if the current subtarget has a feature F. 15class CCIfSubtarget<string F, CCAction A> 16 : CCIf<!strconcat("static_cast<const PPCSubtarget&>" 17 "(State.getMachineFunction().getSubtarget()).", 18 F), 19 A>; 20class CCIfNotSubtarget<string F, CCAction A> 21 : CCIf<!strconcat("!static_cast<const PPCSubtarget&>" 22 "(State.getMachineFunction().getSubtarget()).", 23 F), 24 A>; 25class CCIfOrigArgWasNotPPCF128<CCAction A> 26 : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 27 A>; 28class CCIfOrigArgWasPPCF128<CCAction A> 29 : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 30 A>; 31 32//===----------------------------------------------------------------------===// 33// Return Value Calling Convention 34//===----------------------------------------------------------------------===// 35 36// PPC64 AnyReg return-value convention. No explicit register is specified for 37// the return-value. The register allocator is allowed and expected to choose 38// any free register. 39// 40// This calling convention is currently only supported by the stackmap and 41// patchpoint intrinsics. All other uses will result in an assert on Debug 42// builds. On Release builds we fallback to the PPC C calling convention. 43def RetCC_PPC64_AnyReg : CallingConv<[ 44 CCCustom<"CC_PPC_AnyReg_Error"> 45]>; 46 47// Return-value convention for PowerPC coldcc. 48let Entry = 1 in 49def RetCC_PPC_Cold : CallingConv<[ 50 // Use the same return registers as RetCC_PPC, but limited to only 51 // one return value. The remaining return values will be saved to 52 // the stack. 53 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 54 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 55 56 CCIfType<[i32], CCAssignToReg<[R3]>>, 57 CCIfType<[i64], CCAssignToReg<[X3]>>, 58 CCIfType<[i128], CCAssignToReg<[X3]>>, 59 60 CCIfType<[f32], CCAssignToReg<[F1]>>, 61 CCIfType<[f64], CCAssignToReg<[F1]>>, 62 CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2]>>>, 63 64 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 65 CCIfSubtarget<"hasAltivec()", 66 CCAssignToReg<[V2]>>> 67]>; 68 69// Return-value convention for PowerPC 70let Entry = 1 in 71def RetCC_PPC : CallingConv<[ 72 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 73 74 // On PPC64, integer return values are always promoted to i64 75 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 76 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 77 78 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 79 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 80 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 81 82 // Floating point types returned as "direct" go into F1 .. F8; note that 83 // only the ELFv2 ABI fully utilizes all these registers. 84 CCIfNotSubtarget<"hasSPE()", 85 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 86 CCIfNotSubtarget<"hasSPE()", 87 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 88 CCIfSubtarget<"hasSPE()", 89 CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 90 CCIfSubtarget<"hasSPE()", 91 CCIfType<[f64], CCCustom<"CC_PPC32_SPE_RetF64">>>, 92 93 // For P9, f128 are passed in vector registers. 94 CCIfType<[f128], 95 CCIfSubtarget<"hasAltivec()", 96 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 97 98 // Vector types returned as "direct" go into V2 .. V9; note that only the 99 // ELFv2 ABI fully utilizes all these registers. 100 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 101 CCIfSubtarget<"hasAltivec()", 102 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 103]>; 104 105// No explicit register is specified for the AnyReg calling convention. The 106// register allocator may assign the arguments to any free register. 107// 108// This calling convention is currently only supported by the stackmap and 109// patchpoint intrinsics. All other uses will result in an assert on Debug 110// builds. On Release builds we fallback to the PPC C calling convention. 111def CC_PPC64_AnyReg : CallingConv<[ 112 CCCustom<"CC_PPC_AnyReg_Error"> 113]>; 114 115// Calling Convention corresponding to the 64-bit PowerPC ELFv2 ABI. 116// This calling convention currently only handles integers, floats and 117// vectors within registers, as well as it handles the shadowing of GPRs 118// when floating point and vector arguments are used. 119// FIXME: This calling convention needs to be extended to handle all types and 120// complexities of the ABI. 121let Entry = 1 in 122def CC_PPC64_ELF : CallingConv<[ 123 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>, 124 125 CCIfType<[i1], CCPromoteToType<i64>>, 126 CCIfType<[i8], CCPromoteToType<i64>>, 127 CCIfType<[i16], CCPromoteToType<i64>>, 128 CCIfType<[i32], CCPromoteToType<i64>>, 129 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>, 130 131 // Handle fp types and shadow the corresponding registers as necessary. 132 CCIfType<[f32, f64], CCIfNotVarArg<CCCustom<"CC_PPC64_ELF_Shadow_GPR_Regs">>>, 133 CCIfType<[f32, f64], 134 CCIfNotVarArg<CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, 135 F11, F12, F13]>>>, 136 137 // f128 is handled through vector registers instead of fp registers. 138 CCIfType<[f128], 139 CCIfSubtarget<"hasAltivec()", 140 CCIfNotVarArg<CCCustom<"CC_PPC64_ELF_Shadow_GPR_Regs">>>>, 141 CCIfType<[f128], 142 CCIfSubtarget<"hasAltivec()", 143 CCIfNotVarArg<CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, 144 V11, V12, V13]>>>>, 145 146 // Handle support for vector types, and shadow GPRs as necessary. 147 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v1i128], 148 CCIfSubtarget<"hasAltivec()", 149 CCIfNotVarArg<CCCustom<"CC_PPC64_ELF_Shadow_GPR_Regs">>>>, 150 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64, v1i128], 151 CCIfSubtarget<"hasAltivec()", 152 CCIfNotVarArg<CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9, V10, 153 V11, V12, V13]>>>>, 154]>; 155 156// Simple calling convention for 64-bit ELF PowerPC fast isel. 157// Only handle ints and floats. All ints are promoted to i64. 158// Vector types and quadword ints are not handled. 159let Entry = 1 in 160def CC_PPC64_ELF_FIS : CallingConv<[ 161 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>, 162 163 CCIfType<[i1], CCPromoteToType<i64>>, 164 CCIfType<[i8], CCPromoteToType<i64>>, 165 CCIfType<[i16], CCPromoteToType<i64>>, 166 CCIfType<[i32], CCPromoteToType<i64>>, 167 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>, 168 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>> 169]>; 170 171// Simple return-value convention for 64-bit ELF PowerPC fast isel. 172// All small ints are promoted to i64. Vector types, quadword ints, 173// and multiple register returns are "supported" to avoid compile 174// errors, but none are handled by the fast selector. 175let Entry = 1 in 176def RetCC_PPC64_ELF_FIS : CallingConv<[ 177 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 178 179 CCIfType<[i1], CCPromoteToType<i64>>, 180 CCIfType<[i8], CCPromoteToType<i64>>, 181 CCIfType<[i16], CCPromoteToType<i64>>, 182 CCIfType<[i32], CCPromoteToType<i64>>, 183 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 184 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 185 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 186 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 187 CCIfType<[f128], 188 CCIfSubtarget<"hasAltivec()", 189 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 190 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 191 CCIfSubtarget<"hasAltivec()", 192 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 193]>; 194 195//===----------------------------------------------------------------------===// 196// PowerPC System V Release 4 32-bit ABI 197//===----------------------------------------------------------------------===// 198 199def CC_PPC32_SVR4_Common : CallingConv<[ 200 CCIfType<[i1], CCPromoteToType<i32>>, 201 202 // The ABI requires i64 to be passed in two adjacent registers with the first 203 // register having an odd register number. 204 CCIfType<[i32], 205 CCIfSplit<CCIfSubtarget<"useSoftFloat()", 206 CCIfOrigArgWasNotPPCF128< 207 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>, 208 209 CCIfType<[i32], 210 CCIfSplit<CCIfNotSubtarget<"useSoftFloat()", 211 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>, 212 CCIfType<[f64], 213 CCIfSubtarget<"hasSPE()", 214 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>, 215 CCIfSplit<CCIfSubtarget<"useSoftFloat()", 216 CCIfOrigArgWasPPCF128<CCCustom< 217 "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>, 218 219 // The 'nest' parameter, if any, is passed in R11. 220 CCIfNest<CCAssignToReg<[R11]>>, 221 222 // The first 8 integer arguments are passed in integer registers. 223 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 224 225 // Make sure the i64 words from a long double are either both passed in 226 // registers or both passed on the stack. 227 CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>, 228 229 // FP values are passed in F1 - F8. 230 CCIfType<[f32, f64], 231 CCIfNotSubtarget<"hasSPE()", 232 CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 233 CCIfType<[f64], 234 CCIfSubtarget<"hasSPE()", 235 CCCustom<"CC_PPC32_SPE_CustomSplitFP64">>>, 236 CCIfType<[f32], 237 CCIfSubtarget<"hasSPE()", 238 CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 239 240 // Split arguments have an alignment of 8 bytes on the stack. 241 CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>, 242 243 CCIfType<[i32], CCAssignToStack<4, 4>>, 244 245 CCIfType<[f32], CCAssignToStack<4, 4>>, 246 CCIfType<[f64], CCAssignToStack<8, 8>>, 247 248 // Vectors and float128 get 16-byte stack slots that are 16-byte aligned. 249 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>, 250 CCIfType<[f128], CCIfSubtarget<"hasAltivec()", CCAssignToStack<16, 16>>> 251]>; 252 253// This calling convention puts vector arguments always on the stack. It is used 254// to assign vector arguments which belong to the variable portion of the 255// parameter list of a variable argument function. 256let Entry = 1 in 257def CC_PPC32_SVR4_VarArg : CallingConv<[ 258 CCDelegateTo<CC_PPC32_SVR4_Common> 259]>; 260 261// In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to 262// put vector arguments in vector registers before putting them on the stack. 263let Entry = 1 in 264def CC_PPC32_SVR4 : CallingConv<[ 265 // The first 12 Vector arguments are passed in AltiVec registers. 266 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 267 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 268 V8, V9, V10, V11, V12, V13]>>>, 269 270 // Float128 types treated as vector arguments. 271 CCIfType<[f128], 272 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 273 V8, V9, V10, V11, V12, V13]>>>, 274 275 CCDelegateTo<CC_PPC32_SVR4_Common> 276]>; 277 278// Helper "calling convention" to handle aggregate by value arguments. 279// Aggregate by value arguments are always placed in the local variable space 280// of the caller. This calling convention is only used to assign those stack 281// offsets in the callers stack frame. 282// 283// Still, the address of the aggregate copy in the callers stack frame is passed 284// in a GPR (or in the parameter list area if all GPRs are allocated) from the 285// caller to the callee. The location for the address argument is assigned by 286// the CC_PPC32_SVR4 calling convention. 287// 288// The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are 289// not passed by value. 290 291let Entry = 1 in 292def CC_PPC32_SVR4_ByVal : CallingConv<[ 293 CCIfByVal<CCPassByVal<4, 4>>, 294 295 CCCustom<"CC_PPC32_SVR4_Custom_Dummy"> 296]>; 297 298def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27, 299 V28, V29, V30, V31)>; 300 301// SPE does not use FPRs, so break out the common register set as base. 302def CSR_SVR432_COMM : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, 303 R21, R22, R23, R24, R25, R26, R27, 304 R28, R29, R30, R31, CR2, CR3, CR4 305 )>; 306def CSR_SVR432 : CalleeSavedRegs<(add CSR_SVR432_COMM, F14, F15, F16, F17, F18, 307 F19, F20, F21, F22, F23, F24, F25, F26, 308 F27, F28, F29, F30, F31 309 )>; 310def CSR_SPE : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, S22, 311 S23, S24, S25, S26, S27, S28, S29, S30 312 )>; 313 314def CSR_SPE_NO_S30_31 : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, 315 S22, S23, S24, S25, S26, S27, S28, S29 316 )>; 317 318def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>; 319 320def CSR_SVR432_SPE : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE)>; 321 322def CSR_SVR432_SPE_NO_S30_31 : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE_NO_S30_31)>; 323 324def CSR_AIX32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20, 325 R21, R22, R23, R24, R25, R26, R27, R28, 326 R29, R30, R31, F14, F15, F16, F17, F18, 327 F19, F20, F21, F22, F23, F24, F25, F26, 328 F27, F28, F29, F30, F31, CR2, CR3, CR4 329 )>; 330 331def CSR_AIX32_Altivec : CalleeSavedRegs<(add CSR_AIX32, CSR_Altivec)>; 332 333// Common CalleeSavedRegs for SVR4 and AIX. 334def CSR_PPC64 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, 335 X21, X22, X23, X24, X25, X26, X27, X28, 336 X29, X30, X31, F14, F15, F16, F17, F18, 337 F19, F20, F21, F22, F23, F24, F25, F26, 338 F27, F28, F29, F30, F31, CR2, CR3, CR4 339 )>; 340 341 342def CSR_PPC64_Altivec : CalleeSavedRegs<(add CSR_PPC64, CSR_Altivec)>; 343 344def CSR_PPC64_R2 : CalleeSavedRegs<(add CSR_PPC64, X2)>; 345 346def CSR_PPC64_R2_Altivec : CalleeSavedRegs<(add CSR_PPC64_Altivec, X2)>; 347 348def CSR_NoRegs : CalleeSavedRegs<(add)>; 349 350// coldcc calling convection marks most registers as non-volatile. 351// Do not include r1 since the stack pointer is never considered a CSR. 352// Do not include r2, since it is the TOC register and is added depending 353// on whether or not the function uses the TOC and is a non-leaf. 354// Do not include r0,r11,r13 as they are optional in functional linkage 355// and value may be altered by inter-library calls. 356// Do not include r12 as it is used as a scratch register. 357// Do not include return registers r3, f1, v2. 358def CSR_SVR32_ColdCC_Common : CalleeSavedRegs<(add (sequence "R%u", 4, 10), 359 (sequence "R%u", 14, 31), 360 (sequence "CR%u", 0, 7))>; 361 362def CSR_SVR32_ColdCC : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 363 F0, (sequence "F%u", 2, 31))>; 364 365 366def CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC, 367 (sequence "V%u", 0, 1), 368 (sequence "V%u", 3, 31))>; 369 370def CSR_SVR32_ColdCC_SPE : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 371 (sequence "S%u", 4, 10), 372 (sequence "S%u", 14, 31))>; 373 374def CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10), 375 (sequence "X%u", 14, 31), 376 F0, (sequence "F%u", 2, 31), 377 (sequence "CR%u", 0, 7))>; 378 379def CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>; 380 381def CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC, 382 (sequence "V%u", 0, 1), 383 (sequence "V%u", 3, 31))>; 384 385def CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>; 386 387def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10), 388 (sequence "X%u", 14, 31), 389 (sequence "F%u", 0, 31), 390 (sequence "CR%u", 0, 7))>; 391 392def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 393 (sequence "V%u", 0, 31))>; 394 395def CSR_64_AllRegs_AIX_Dflt_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 396 (sequence "V%u", 0, 19))>; 397 398def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 399 (sequence "VSL%u", 0, 31))>; 400 401def CSR_64_AllRegs_AIX_Dflt_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 402 (sequence "VSL%u", 0, 19))>; 403 404def CSR_ALL_VSRP : CalleeSavedRegs<(sequence "VSRp%u", 0, 31)>; 405 406def CSR_VSRP : 407 CalleeSavedRegs<(add VSRp26, VSRp27, VSRp28, VSRp29, VSRp30, VSRp31)>; 408 409def CSR_SVR432_VSRP : CalleeSavedRegs<(add CSR_SVR432_Altivec, CSR_VSRP)>; 410 411def CSR_SVR464_VSRP : CalleeSavedRegs<(add CSR_PPC64_Altivec, CSR_VSRP)>; 412 413def CSR_SVR464_R2_VSRP : CalleeSavedRegs<(add CSR_SVR464_VSRP, X2)>; 414 415def CSR_SVR32_ColdCC_VSRP : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Altivec, 416 (sub CSR_ALL_VSRP, VSRp17))>; 417 418def CSR_SVR64_ColdCC_VSRP : CalleeSavedRegs<(add CSR_SVR64_ColdCC, 419 (sub CSR_ALL_VSRP, VSRp17))>; 420 421def CSR_SVR64_ColdCC_R2_VSRP : CalleeSavedRegs<(add CSR_SVR64_ColdCC_VSRP, X2)>; 422 423def CSR_64_AllRegs_VSRP : 424 CalleeSavedRegs<(add CSR_64_AllRegs_VSX, CSR_ALL_VSRP)>; 425 426def CSR_AIX64_VSRP : CalleeSavedRegs<(add CSR_PPC64_Altivec, CSR_VSRP)>; 427 428def CSR_AIX64_R2_VSRP : CalleeSavedRegs<(add CSR_AIX64_VSRP, X2)>; 429 430def CSR_AIX32_VSRP : CalleeSavedRegs<(add CSR_AIX32_Altivec, CSR_VSRP)>; 431