1*0b57cec5SDimitry Andric//===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===// 2*0b57cec5SDimitry Andric// 3*0b57cec5SDimitry Andric// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*0b57cec5SDimitry Andric// See https://llvm.org/LICENSE.txt for license information. 5*0b57cec5SDimitry Andric// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*0b57cec5SDimitry Andric// 7*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 8*0b57cec5SDimitry Andric// 9*0b57cec5SDimitry Andric// This describes the calling conventions for the PowerPC 32- and 64-bit 10*0b57cec5SDimitry Andric// architectures. 11*0b57cec5SDimitry Andric// 12*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 13*0b57cec5SDimitry Andric 14*0b57cec5SDimitry Andric/// CCIfSubtarget - Match if the current subtarget has a feature F. 15*0b57cec5SDimitry Andricclass CCIfSubtarget<string F, CCAction A> 16*0b57cec5SDimitry Andric : CCIf<!strconcat("static_cast<const PPCSubtarget&>" 17*0b57cec5SDimitry Andric "(State.getMachineFunction().getSubtarget()).", 18*0b57cec5SDimitry Andric F), 19*0b57cec5SDimitry Andric A>; 20*0b57cec5SDimitry Andricclass CCIfNotSubtarget<string F, CCAction A> 21*0b57cec5SDimitry Andric : CCIf<!strconcat("!static_cast<const PPCSubtarget&>" 22*0b57cec5SDimitry Andric "(State.getMachineFunction().getSubtarget()).", 23*0b57cec5SDimitry Andric F), 24*0b57cec5SDimitry Andric A>; 25*0b57cec5SDimitry Andricclass CCIfOrigArgWasNotPPCF128<CCAction A> 26*0b57cec5SDimitry Andric : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 27*0b57cec5SDimitry Andric A>; 28*0b57cec5SDimitry Andricclass CCIfOrigArgWasPPCF128<CCAction A> 29*0b57cec5SDimitry Andric : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)", 30*0b57cec5SDimitry Andric A>; 31*0b57cec5SDimitry Andric 32*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 33*0b57cec5SDimitry Andric// Return Value Calling Convention 34*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 35*0b57cec5SDimitry Andric 36*0b57cec5SDimitry Andric// PPC64 AnyReg return-value convention. No explicit register is specified for 37*0b57cec5SDimitry Andric// the return-value. The register allocator is allowed and expected to choose 38*0b57cec5SDimitry Andric// any free register. 39*0b57cec5SDimitry Andric// 40*0b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and 41*0b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug 42*0b57cec5SDimitry Andric// builds. On Release builds we fallback to the PPC C calling convention. 43*0b57cec5SDimitry Andricdef RetCC_PPC64_AnyReg : CallingConv<[ 44*0b57cec5SDimitry Andric CCCustom<"CC_PPC_AnyReg_Error"> 45*0b57cec5SDimitry Andric]>; 46*0b57cec5SDimitry Andric 47*0b57cec5SDimitry Andric// Return-value convention for PowerPC coldcc. 48*0b57cec5SDimitry Andriclet Entry = 1 in 49*0b57cec5SDimitry Andricdef RetCC_PPC_Cold : CallingConv<[ 50*0b57cec5SDimitry Andric // Use the same return registers as RetCC_PPC, but limited to only 51*0b57cec5SDimitry Andric // one return value. The remaining return values will be saved to 52*0b57cec5SDimitry Andric // the stack. 53*0b57cec5SDimitry Andric CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 54*0b57cec5SDimitry Andric CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 55*0b57cec5SDimitry Andric 56*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToReg<[R3]>>, 57*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToReg<[X3]>>, 58*0b57cec5SDimitry Andric CCIfType<[i128], CCAssignToReg<[X3]>>, 59*0b57cec5SDimitry Andric 60*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToReg<[F1]>>, 61*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToReg<[F1]>>, 62*0b57cec5SDimitry Andric CCIfType<[f128], CCIfSubtarget<"hasP9Vector()", CCAssignToReg<[V2]>>>, 63*0b57cec5SDimitry Andric 64*0b57cec5SDimitry Andric CCIfType<[v4f64, v4f32, v4i1], 65*0b57cec5SDimitry Andric CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1]>>>, 66*0b57cec5SDimitry Andric 67*0b57cec5SDimitry Andric CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 68*0b57cec5SDimitry Andric CCIfSubtarget<"hasAltivec()", 69*0b57cec5SDimitry Andric CCAssignToReg<[V2]>>> 70*0b57cec5SDimitry Andric]>; 71*0b57cec5SDimitry Andric 72*0b57cec5SDimitry Andric// Return-value convention for PowerPC 73*0b57cec5SDimitry Andriclet Entry = 1 in 74*0b57cec5SDimitry Andricdef RetCC_PPC : CallingConv<[ 75*0b57cec5SDimitry Andric CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 76*0b57cec5SDimitry Andric 77*0b57cec5SDimitry Andric // On PPC64, integer return values are always promoted to i64 78*0b57cec5SDimitry Andric CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>, 79*0b57cec5SDimitry Andric CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>, 80*0b57cec5SDimitry Andric 81*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 82*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 83*0b57cec5SDimitry Andric CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 84*0b57cec5SDimitry Andric 85*0b57cec5SDimitry Andric // Floating point types returned as "direct" go into F1 .. F8; note that 86*0b57cec5SDimitry Andric // only the ELFv2 ABI fully utilizes all these registers. 87*0b57cec5SDimitry Andric CCIfNotSubtarget<"hasSPE()", 88*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 89*0b57cec5SDimitry Andric CCIfNotSubtarget<"hasSPE()", 90*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 91*0b57cec5SDimitry Andric CCIfSubtarget<"hasSPE()", 92*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 93*0b57cec5SDimitry Andric CCIfSubtarget<"hasSPE()", 94*0b57cec5SDimitry Andric CCIfType<[f64], CCCustom<"CC_PPC32_SPE_RetF64">>>, 95*0b57cec5SDimitry Andric 96*0b57cec5SDimitry Andric // For P9, f128 are passed in vector registers. 97*0b57cec5SDimitry Andric CCIfType<[f128], 98*0b57cec5SDimitry Andric CCIfSubtarget<"hasP9Vector()", 99*0b57cec5SDimitry Andric CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 100*0b57cec5SDimitry Andric 101*0b57cec5SDimitry Andric // QPX vectors are returned in QF1 and QF2. 102*0b57cec5SDimitry Andric CCIfType<[v4f64, v4f32, v4i1], 103*0b57cec5SDimitry Andric CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>, 104*0b57cec5SDimitry Andric 105*0b57cec5SDimitry Andric // Vector types returned as "direct" go into V2 .. V9; note that only the 106*0b57cec5SDimitry Andric // ELFv2 ABI fully utilizes all these registers. 107*0b57cec5SDimitry Andric CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 108*0b57cec5SDimitry Andric CCIfSubtarget<"hasAltivec()", 109*0b57cec5SDimitry Andric CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 110*0b57cec5SDimitry Andric]>; 111*0b57cec5SDimitry Andric 112*0b57cec5SDimitry Andric// No explicit register is specified for the AnyReg calling convention. The 113*0b57cec5SDimitry Andric// register allocator may assign the arguments to any free register. 114*0b57cec5SDimitry Andric// 115*0b57cec5SDimitry Andric// This calling convention is currently only supported by the stackmap and 116*0b57cec5SDimitry Andric// patchpoint intrinsics. All other uses will result in an assert on Debug 117*0b57cec5SDimitry Andric// builds. On Release builds we fallback to the PPC C calling convention. 118*0b57cec5SDimitry Andricdef CC_PPC64_AnyReg : CallingConv<[ 119*0b57cec5SDimitry Andric CCCustom<"CC_PPC_AnyReg_Error"> 120*0b57cec5SDimitry Andric]>; 121*0b57cec5SDimitry Andric 122*0b57cec5SDimitry Andric// Note that we don't currently have calling conventions for 64-bit 123*0b57cec5SDimitry Andric// PowerPC, but handle all the complexities of the ABI in the lowering 124*0b57cec5SDimitry Andric// logic. FIXME: See if the logic can be simplified with use of CCs. 125*0b57cec5SDimitry Andric// This may require some extensions to current table generation. 126*0b57cec5SDimitry Andric 127*0b57cec5SDimitry Andric// Simple calling convention for 64-bit ELF PowerPC fast isel. 128*0b57cec5SDimitry Andric// Only handle ints and floats. All ints are promoted to i64. 129*0b57cec5SDimitry Andric// Vector types and quadword ints are not handled. 130*0b57cec5SDimitry Andriclet Entry = 1 in 131*0b57cec5SDimitry Andricdef CC_PPC64_ELF_FIS : CallingConv<[ 132*0b57cec5SDimitry Andric CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>, 133*0b57cec5SDimitry Andric 134*0b57cec5SDimitry Andric CCIfType<[i1], CCPromoteToType<i64>>, 135*0b57cec5SDimitry Andric CCIfType<[i8], CCPromoteToType<i64>>, 136*0b57cec5SDimitry Andric CCIfType<[i16], CCPromoteToType<i64>>, 137*0b57cec5SDimitry Andric CCIfType<[i32], CCPromoteToType<i64>>, 138*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>, 139*0b57cec5SDimitry Andric CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>> 140*0b57cec5SDimitry Andric]>; 141*0b57cec5SDimitry Andric 142*0b57cec5SDimitry Andric// Simple return-value convention for 64-bit ELF PowerPC fast isel. 143*0b57cec5SDimitry Andric// All small ints are promoted to i64. Vector types, quadword ints, 144*0b57cec5SDimitry Andric// and multiple register returns are "supported" to avoid compile 145*0b57cec5SDimitry Andric// errors, but none are handled by the fast selector. 146*0b57cec5SDimitry Andriclet Entry = 1 in 147*0b57cec5SDimitry Andricdef RetCC_PPC64_ELF_FIS : CallingConv<[ 148*0b57cec5SDimitry Andric CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>, 149*0b57cec5SDimitry Andric 150*0b57cec5SDimitry Andric CCIfType<[i1], CCPromoteToType<i64>>, 151*0b57cec5SDimitry Andric CCIfType<[i8], CCPromoteToType<i64>>, 152*0b57cec5SDimitry Andric CCIfType<[i16], CCPromoteToType<i64>>, 153*0b57cec5SDimitry Andric CCIfType<[i32], CCPromoteToType<i64>>, 154*0b57cec5SDimitry Andric CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, 155*0b57cec5SDimitry Andric CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>, 156*0b57cec5SDimitry Andric CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 157*0b57cec5SDimitry Andric CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>, 158*0b57cec5SDimitry Andric CCIfType<[f128], 159*0b57cec5SDimitry Andric CCIfSubtarget<"hasP9Vector()", 160*0b57cec5SDimitry Andric CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>, 161*0b57cec5SDimitry Andric CCIfType<[v4f64, v4f32, v4i1], 162*0b57cec5SDimitry Andric CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>, 163*0b57cec5SDimitry Andric CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 164*0b57cec5SDimitry Andric CCIfSubtarget<"hasAltivec()", 165*0b57cec5SDimitry Andric CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>> 166*0b57cec5SDimitry Andric]>; 167*0b57cec5SDimitry Andric 168*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 169*0b57cec5SDimitry Andric// PowerPC System V Release 4 32-bit ABI 170*0b57cec5SDimitry Andric//===----------------------------------------------------------------------===// 171*0b57cec5SDimitry Andric 172*0b57cec5SDimitry Andricdef CC_PPC32_SVR4_Common : CallingConv<[ 173*0b57cec5SDimitry Andric CCIfType<[i1], CCPromoteToType<i32>>, 174*0b57cec5SDimitry Andric 175*0b57cec5SDimitry Andric // The ABI requires i64 to be passed in two adjacent registers with the first 176*0b57cec5SDimitry Andric // register having an odd register number. 177*0b57cec5SDimitry Andric CCIfType<[i32], 178*0b57cec5SDimitry Andric CCIfSplit<CCIfSubtarget<"useSoftFloat()", 179*0b57cec5SDimitry Andric CCIfOrigArgWasNotPPCF128< 180*0b57cec5SDimitry Andric CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>, 181*0b57cec5SDimitry Andric 182*0b57cec5SDimitry Andric CCIfType<[i32], 183*0b57cec5SDimitry Andric CCIfSplit<CCIfNotSubtarget<"useSoftFloat()", 184*0b57cec5SDimitry Andric CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>, 185*0b57cec5SDimitry Andric CCIfType<[f64], 186*0b57cec5SDimitry Andric CCIfSubtarget<"hasSPE()", 187*0b57cec5SDimitry Andric CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>, 188*0b57cec5SDimitry Andric CCIfSplit<CCIfSubtarget<"useSoftFloat()", 189*0b57cec5SDimitry Andric CCIfOrigArgWasPPCF128<CCCustom< 190*0b57cec5SDimitry Andric "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>, 191*0b57cec5SDimitry Andric 192*0b57cec5SDimitry Andric // The 'nest' parameter, if any, is passed in R11. 193*0b57cec5SDimitry Andric CCIfNest<CCAssignToReg<[R11]>>, 194*0b57cec5SDimitry Andric 195*0b57cec5SDimitry Andric // The first 8 integer arguments are passed in integer registers. 196*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, 197*0b57cec5SDimitry Andric 198*0b57cec5SDimitry Andric // Make sure the i64 words from a long double are either both passed in 199*0b57cec5SDimitry Andric // registers or both passed on the stack. 200*0b57cec5SDimitry Andric CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>, 201*0b57cec5SDimitry Andric 202*0b57cec5SDimitry Andric // FP values are passed in F1 - F8. 203*0b57cec5SDimitry Andric CCIfType<[f32, f64], 204*0b57cec5SDimitry Andric CCIfNotSubtarget<"hasSPE()", 205*0b57cec5SDimitry Andric CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>>, 206*0b57cec5SDimitry Andric CCIfType<[f64], 207*0b57cec5SDimitry Andric CCIfSubtarget<"hasSPE()", 208*0b57cec5SDimitry Andric CCCustom<"CC_PPC32_SPE_CustomSplitFP64">>>, 209*0b57cec5SDimitry Andric CCIfType<[f32], 210*0b57cec5SDimitry Andric CCIfSubtarget<"hasSPE()", 211*0b57cec5SDimitry Andric CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>>, 212*0b57cec5SDimitry Andric 213*0b57cec5SDimitry Andric // Split arguments have an alignment of 8 bytes on the stack. 214*0b57cec5SDimitry Andric CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>, 215*0b57cec5SDimitry Andric 216*0b57cec5SDimitry Andric CCIfType<[i32], CCAssignToStack<4, 4>>, 217*0b57cec5SDimitry Andric 218*0b57cec5SDimitry Andric // Floats are stored in double precision format, thus they have the same 219*0b57cec5SDimitry Andric // alignment and size as doubles. 220*0b57cec5SDimitry Andric // With SPE floats are stored as single precision, so have alignment and 221*0b57cec5SDimitry Andric // size of int. 222*0b57cec5SDimitry Andric CCIfType<[f32,f64], CCIfNotSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>, 223*0b57cec5SDimitry Andric CCIfType<[f32], CCIfSubtarget<"hasSPE()", CCAssignToStack<4, 4>>>, 224*0b57cec5SDimitry Andric CCIfType<[f64], CCIfSubtarget<"hasSPE()", CCAssignToStack<8, 8>>>, 225*0b57cec5SDimitry Andric 226*0b57cec5SDimitry Andric // QPX vectors that are stored in double precision need 32-byte alignment. 227*0b57cec5SDimitry Andric CCIfType<[v4f64, v4i1], CCAssignToStack<32, 32>>, 228*0b57cec5SDimitry Andric 229*0b57cec5SDimitry Andric // Vectors and float128 get 16-byte stack slots that are 16-byte aligned. 230*0b57cec5SDimitry Andric CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>, 231*0b57cec5SDimitry Andric CCIfType<[f128], CCIfSubtarget<"hasP9Vector()", CCAssignToStack<16, 16>>> 232*0b57cec5SDimitry Andric]>; 233*0b57cec5SDimitry Andric 234*0b57cec5SDimitry Andric// This calling convention puts vector arguments always on the stack. It is used 235*0b57cec5SDimitry Andric// to assign vector arguments which belong to the variable portion of the 236*0b57cec5SDimitry Andric// parameter list of a variable argument function. 237*0b57cec5SDimitry Andriclet Entry = 1 in 238*0b57cec5SDimitry Andricdef CC_PPC32_SVR4_VarArg : CallingConv<[ 239*0b57cec5SDimitry Andric CCDelegateTo<CC_PPC32_SVR4_Common> 240*0b57cec5SDimitry Andric]>; 241*0b57cec5SDimitry Andric 242*0b57cec5SDimitry Andric// In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to 243*0b57cec5SDimitry Andric// put vector arguments in vector registers before putting them on the stack. 244*0b57cec5SDimitry Andriclet Entry = 1 in 245*0b57cec5SDimitry Andricdef CC_PPC32_SVR4 : CallingConv<[ 246*0b57cec5SDimitry Andric // QPX vectors mirror the scalar FP convention. 247*0b57cec5SDimitry Andric CCIfType<[v4f64, v4f32, v4i1], CCIfSubtarget<"hasQPX()", 248*0b57cec5SDimitry Andric CCAssignToReg<[QF1, QF2, QF3, QF4, QF5, QF6, QF7, QF8]>>>, 249*0b57cec5SDimitry Andric 250*0b57cec5SDimitry Andric // The first 12 Vector arguments are passed in AltiVec registers. 251*0b57cec5SDimitry Andric CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32, v2f64], 252*0b57cec5SDimitry Andric CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 253*0b57cec5SDimitry Andric V8, V9, V10, V11, V12, V13]>>>, 254*0b57cec5SDimitry Andric 255*0b57cec5SDimitry Andric // Float128 types treated as vector arguments. 256*0b57cec5SDimitry Andric CCIfType<[f128], 257*0b57cec5SDimitry Andric CCIfSubtarget<"hasP9Vector()", CCAssignToReg<[V2, V3, V4, V5, V6, V7, 258*0b57cec5SDimitry Andric V8, V9, V10, V11, V12, V13]>>>, 259*0b57cec5SDimitry Andric 260*0b57cec5SDimitry Andric CCDelegateTo<CC_PPC32_SVR4_Common> 261*0b57cec5SDimitry Andric]>; 262*0b57cec5SDimitry Andric 263*0b57cec5SDimitry Andric// Helper "calling convention" to handle aggregate by value arguments. 264*0b57cec5SDimitry Andric// Aggregate by value arguments are always placed in the local variable space 265*0b57cec5SDimitry Andric// of the caller. This calling convention is only used to assign those stack 266*0b57cec5SDimitry Andric// offsets in the callers stack frame. 267*0b57cec5SDimitry Andric// 268*0b57cec5SDimitry Andric// Still, the address of the aggregate copy in the callers stack frame is passed 269*0b57cec5SDimitry Andric// in a GPR (or in the parameter list area if all GPRs are allocated) from the 270*0b57cec5SDimitry Andric// caller to the callee. The location for the address argument is assigned by 271*0b57cec5SDimitry Andric// the CC_PPC32_SVR4 calling convention. 272*0b57cec5SDimitry Andric// 273*0b57cec5SDimitry Andric// The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are 274*0b57cec5SDimitry Andric// not passed by value. 275*0b57cec5SDimitry Andric 276*0b57cec5SDimitry Andriclet Entry = 1 in 277*0b57cec5SDimitry Andricdef CC_PPC32_SVR4_ByVal : CallingConv<[ 278*0b57cec5SDimitry Andric CCIfByVal<CCPassByVal<4, 4>>, 279*0b57cec5SDimitry Andric 280*0b57cec5SDimitry Andric CCCustom<"CC_PPC32_SVR4_Custom_Dummy"> 281*0b57cec5SDimitry Andric]>; 282*0b57cec5SDimitry Andric 283*0b57cec5SDimitry Andricdef CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27, 284*0b57cec5SDimitry Andric V28, V29, V30, V31)>; 285*0b57cec5SDimitry Andric 286*0b57cec5SDimitry Andricdef CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20, 287*0b57cec5SDimitry Andric R21, R22, R23, R24, R25, R26, R27, R28, 288*0b57cec5SDimitry Andric R29, R30, R31, F14, F15, F16, F17, F18, 289*0b57cec5SDimitry Andric F19, F20, F21, F22, F23, F24, F25, F26, 290*0b57cec5SDimitry Andric F27, F28, F29, F30, F31, CR2, CR3, CR4 291*0b57cec5SDimitry Andric )>; 292*0b57cec5SDimitry Andric 293*0b57cec5SDimitry Andricdef CSR_Darwin32_Altivec : CalleeSavedRegs<(add CSR_Darwin32, CSR_Altivec)>; 294*0b57cec5SDimitry Andric 295*0b57cec5SDimitry Andric// SPE does not use FPRs, so break out the common register set as base. 296*0b57cec5SDimitry Andricdef CSR_SVR432_COMM : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20, 297*0b57cec5SDimitry Andric R21, R22, R23, R24, R25, R26, R27, 298*0b57cec5SDimitry Andric R28, R29, R30, R31, CR2, CR3, CR4 299*0b57cec5SDimitry Andric )>; 300*0b57cec5SDimitry Andricdef CSR_SVR432 : CalleeSavedRegs<(add CSR_SVR432_COMM, F14, F15, F16, F17, F18, 301*0b57cec5SDimitry Andric F19, F20, F21, F22, F23, F24, F25, F26, 302*0b57cec5SDimitry Andric F27, F28, F29, F30, F31 303*0b57cec5SDimitry Andric )>; 304*0b57cec5SDimitry Andricdef CSR_SPE : CalleeSavedRegs<(add S14, S15, S16, S17, S18, S19, S20, S21, S22, 305*0b57cec5SDimitry Andric S23, S24, S25, S26, S27, S28, S29, S30, S31 306*0b57cec5SDimitry Andric )>; 307*0b57cec5SDimitry Andric 308*0b57cec5SDimitry Andricdef CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>; 309*0b57cec5SDimitry Andric 310*0b57cec5SDimitry Andricdef CSR_SVR432_SPE : CalleeSavedRegs<(add CSR_SVR432_COMM, CSR_SPE)>; 311*0b57cec5SDimitry Andric 312*0b57cec5SDimitry Andricdef CSR_AIX32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20, 313*0b57cec5SDimitry Andric R21, R22, R23, R24, R25, R26, R27, R28, 314*0b57cec5SDimitry Andric R29, R30, R31, F14, F15, F16, F17, F18, 315*0b57cec5SDimitry Andric F19, F20, F21, F22, F23, F24, F25, F26, 316*0b57cec5SDimitry Andric F27, F28, F29, F30, F31, CR2, CR3, CR4 317*0b57cec5SDimitry Andric )>; 318*0b57cec5SDimitry Andric 319*0b57cec5SDimitry Andricdef CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20, 320*0b57cec5SDimitry Andric X21, X22, X23, X24, X25, X26, X27, X28, 321*0b57cec5SDimitry Andric X29, X30, X31, F14, F15, F16, F17, F18, 322*0b57cec5SDimitry Andric F19, F20, F21, F22, F23, F24, F25, F26, 323*0b57cec5SDimitry Andric F27, F28, F29, F30, F31, CR2, CR3, CR4 324*0b57cec5SDimitry Andric )>; 325*0b57cec5SDimitry Andric 326*0b57cec5SDimitry Andricdef CSR_Darwin64_Altivec : CalleeSavedRegs<(add CSR_Darwin64, CSR_Altivec)>; 327*0b57cec5SDimitry Andric 328*0b57cec5SDimitry Andricdef CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, 329*0b57cec5SDimitry Andric X21, X22, X23, X24, X25, X26, X27, X28, 330*0b57cec5SDimitry Andric X29, X30, X31, F14, F15, F16, F17, F18, 331*0b57cec5SDimitry Andric F19, F20, F21, F22, F23, F24, F25, F26, 332*0b57cec5SDimitry Andric F27, F28, F29, F30, F31, CR2, CR3, CR4 333*0b57cec5SDimitry Andric )>; 334*0b57cec5SDimitry Andric 335*0b57cec5SDimitry Andricdef CSR_AIX64 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20, 336*0b57cec5SDimitry Andric X21, X22, X23, X24, X25, X26, X27, X28, 337*0b57cec5SDimitry Andric X29, X30, X31, F14, F15, F16, F17, F18, 338*0b57cec5SDimitry Andric F19, F20, F21, F22, F23, F24, F25, F26, 339*0b57cec5SDimitry Andric F27, F28, F29, F30, F31, CR2, CR3, CR4 340*0b57cec5SDimitry Andric )>; 341*0b57cec5SDimitry Andric 342*0b57cec5SDimitry Andric// CSRs that are handled by prologue, epilogue. 343*0b57cec5SDimitry Andricdef CSR_SRV464_TLS_PE : CalleeSavedRegs<(add)>; 344*0b57cec5SDimitry Andric 345*0b57cec5SDimitry Andricdef CSR_SVR464_ViaCopy : CalleeSavedRegs<(add CSR_SVR464)>; 346*0b57cec5SDimitry Andric 347*0b57cec5SDimitry Andricdef CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>; 348*0b57cec5SDimitry Andric 349*0b57cec5SDimitry Andricdef CSR_SVR464_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_Altivec)>; 350*0b57cec5SDimitry Andric 351*0b57cec5SDimitry Andricdef CSR_SVR464_R2 : CalleeSavedRegs<(add CSR_SVR464, X2)>; 352*0b57cec5SDimitry Andric 353*0b57cec5SDimitry Andricdef CSR_SVR464_R2_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2)>; 354*0b57cec5SDimitry Andric 355*0b57cec5SDimitry Andricdef CSR_SVR464_R2_Altivec : CalleeSavedRegs<(add CSR_SVR464_Altivec, X2)>; 356*0b57cec5SDimitry Andric 357*0b57cec5SDimitry Andricdef CSR_SVR464_R2_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2_Altivec)>; 358*0b57cec5SDimitry Andric 359*0b57cec5SDimitry Andricdef CSR_NoRegs : CalleeSavedRegs<(add)>; 360*0b57cec5SDimitry Andric 361*0b57cec5SDimitry Andric// coldcc calling convection marks most registers as non-volatile. 362*0b57cec5SDimitry Andric// Do not include r1 since the stack pointer is never considered a CSR. 363*0b57cec5SDimitry Andric// Do not include r2, since it is the TOC register and is added depending 364*0b57cec5SDimitry Andric// on whether or not the function uses the TOC and is a non-leaf. 365*0b57cec5SDimitry Andric// Do not include r0,r11,r13 as they are optional in functional linkage 366*0b57cec5SDimitry Andric// and value may be altered by inter-library calls. 367*0b57cec5SDimitry Andric// Do not include r12 as it is used as a scratch register. 368*0b57cec5SDimitry Andric// Do not include return registers r3, f1, v2. 369*0b57cec5SDimitry Andricdef CSR_SVR32_ColdCC_Common : CalleeSavedRegs<(add (sequence "R%u", 4, 10), 370*0b57cec5SDimitry Andric (sequence "R%u", 14, 31), 371*0b57cec5SDimitry Andric (sequence "CR%u", 0, 7))>; 372*0b57cec5SDimitry Andric 373*0b57cec5SDimitry Andricdef CSR_SVR32_ColdCC : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 374*0b57cec5SDimitry Andric F0, (sequence "F%u", 2, 31))>; 375*0b57cec5SDimitry Andric 376*0b57cec5SDimitry Andric 377*0b57cec5SDimitry Andricdef CSR_SVR32_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR32_ColdCC, 378*0b57cec5SDimitry Andric (sequence "V%u", 0, 1), 379*0b57cec5SDimitry Andric (sequence "V%u", 3, 31))>; 380*0b57cec5SDimitry Andric 381*0b57cec5SDimitry Andricdef CSR_SVR32_ColdCC_SPE : CalleeSavedRegs<(add CSR_SVR32_ColdCC_Common, 382*0b57cec5SDimitry Andric (sequence "S%u", 4, 10), 383*0b57cec5SDimitry Andric (sequence "S%u", 14, 31))>; 384*0b57cec5SDimitry Andric 385*0b57cec5SDimitry Andricdef CSR_SVR64_ColdCC : CalleeSavedRegs<(add (sequence "X%u", 4, 10), 386*0b57cec5SDimitry Andric (sequence "X%u", 14, 31), 387*0b57cec5SDimitry Andric F0, (sequence "F%u", 2, 31), 388*0b57cec5SDimitry Andric (sequence "CR%u", 0, 7))>; 389*0b57cec5SDimitry Andric 390*0b57cec5SDimitry Andricdef CSR_SVR64_ColdCC_R2: CalleeSavedRegs<(add CSR_SVR64_ColdCC, X2)>; 391*0b57cec5SDimitry Andric 392*0b57cec5SDimitry Andricdef CSR_SVR64_ColdCC_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC, 393*0b57cec5SDimitry Andric (sequence "V%u", 0, 1), 394*0b57cec5SDimitry Andric (sequence "V%u", 3, 31))>; 395*0b57cec5SDimitry Andric 396*0b57cec5SDimitry Andricdef CSR_SVR64_ColdCC_R2_Altivec : CalleeSavedRegs<(add CSR_SVR64_ColdCC_Altivec, X2)>; 397*0b57cec5SDimitry Andric 398*0b57cec5SDimitry Andricdef CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10), 399*0b57cec5SDimitry Andric (sequence "X%u", 14, 31), 400*0b57cec5SDimitry Andric (sequence "F%u", 0, 31), 401*0b57cec5SDimitry Andric (sequence "CR%u", 0, 7))>; 402*0b57cec5SDimitry Andric 403*0b57cec5SDimitry Andricdef CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs, 404*0b57cec5SDimitry Andric (sequence "V%u", 0, 31))>; 405*0b57cec5SDimitry Andric 406*0b57cec5SDimitry Andricdef CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec, 407*0b57cec5SDimitry Andric (sequence "VSL%u", 0, 31))>; 408*0b57cec5SDimitry Andric 409