1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CCState class, used for lowering and implementing 10 // calling conventions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/CallingConvLower.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/MachineFunction.h" 17 #include "llvm/CodeGen/TargetLowering.h" 18 #include "llvm/CodeGen/TargetRegisterInfo.h" 19 #include "llvm/CodeGen/TargetSubtargetInfo.h" 20 #include "llvm/MC/MCRegisterInfo.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/ErrorHandling.h" 23 #include "llvm/Support/SaveAndRestore.h" 24 #include "llvm/Support/raw_ostream.h" 25 26 using namespace llvm; 27 28 CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf, 29 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C) 30 : CallingConv(CC), IsVarArg(isVarArg), MF(mf), 31 TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) { 32 // No stack is used. 33 StackOffset = 0; 34 35 clearByValRegsInfo(); 36 UsedRegs.resize((TRI.getNumRegs()+31)/32); 37 } 38 39 /// Allocate space on the stack large enough to pass an argument by value. 40 /// The size and alignment information of the argument is encoded in 41 /// its parameter attribute. 42 void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT, 43 CCValAssign::LocInfo LocInfo, int MinSize, 44 Align MinAlign, ISD::ArgFlagsTy ArgFlags) { 45 Align Alignment = ArgFlags.getNonZeroByValAlign(); 46 unsigned Size = ArgFlags.getByValSize(); 47 if (MinSize > (int)Size) 48 Size = MinSize; 49 if (MinAlign > Alignment) 50 Alignment = MinAlign; 51 ensureMaxAlignment(Alignment); 52 MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Alignment); 53 Size = unsigned(alignTo(Size, MinAlign)); 54 unsigned Offset = AllocateStack(Size, Alignment); 55 addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 56 } 57 58 /// Mark a register and all of its aliases as allocated. 59 void CCState::MarkAllocated(MCPhysReg Reg) { 60 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) 61 UsedRegs[*AI / 32] |= 1 << (*AI & 31); 62 } 63 64 void CCState::MarkUnallocated(MCPhysReg Reg) { 65 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) 66 UsedRegs[*AI / 32] &= ~(1 << (*AI & 31)); 67 } 68 69 bool CCState::IsShadowAllocatedReg(MCRegister Reg) const { 70 if (!isAllocated(Reg)) 71 return false; 72 73 for (auto const &ValAssign : Locs) 74 if (ValAssign.isRegLoc() && TRI.regsOverlap(ValAssign.getLocReg(), Reg)) 75 return false; 76 return true; 77 } 78 79 /// Analyze an array of argument values, 80 /// incorporating info about the formals into this state. 81 void 82 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins, 83 CCAssignFn Fn) { 84 unsigned NumArgs = Ins.size(); 85 86 for (unsigned i = 0; i != NumArgs; ++i) { 87 MVT ArgVT = Ins[i].VT; 88 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 89 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) 90 report_fatal_error("unable to allocate function argument #" + Twine(i)); 91 } 92 } 93 94 /// Analyze the return values of a function, returning true if the return can 95 /// be performed without sret-demotion and false otherwise. 96 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 97 CCAssignFn Fn) { 98 // Determine which register each value should be copied into. 99 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 100 MVT VT = Outs[i].VT; 101 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 102 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) 103 return false; 104 } 105 return true; 106 } 107 108 /// Analyze the returned values of a return, 109 /// incorporating info about the result values into this state. 110 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 111 CCAssignFn Fn) { 112 // Determine which register each value should be copied into. 113 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 114 MVT VT = Outs[i].VT; 115 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 116 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) 117 report_fatal_error("unable to allocate function return #" + Twine(i)); 118 } 119 } 120 121 /// Analyze the outgoing arguments to a call, 122 /// incorporating info about the passed values into this state. 123 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs, 124 CCAssignFn Fn) { 125 unsigned NumOps = Outs.size(); 126 for (unsigned i = 0; i != NumOps; ++i) { 127 MVT ArgVT = Outs[i].VT; 128 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 129 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 130 #ifndef NDEBUG 131 dbgs() << "Call operand #" << i << " has unhandled type " 132 << EVT(ArgVT).getEVTString() << '\n'; 133 #endif 134 llvm_unreachable(nullptr); 135 } 136 } 137 } 138 139 /// Same as above except it takes vectors of types and argument flags. 140 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs, 141 SmallVectorImpl<ISD::ArgFlagsTy> &Flags, 142 CCAssignFn Fn) { 143 unsigned NumOps = ArgVTs.size(); 144 for (unsigned i = 0; i != NumOps; ++i) { 145 MVT ArgVT = ArgVTs[i]; 146 ISD::ArgFlagsTy ArgFlags = Flags[i]; 147 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 148 #ifndef NDEBUG 149 dbgs() << "Call operand #" << i << " has unhandled type " 150 << EVT(ArgVT).getEVTString() << '\n'; 151 #endif 152 llvm_unreachable(nullptr); 153 } 154 } 155 } 156 157 /// Analyze the return values of a call, incorporating info about the passed 158 /// values into this state. 159 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, 160 CCAssignFn Fn) { 161 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 162 MVT VT = Ins[i].VT; 163 ISD::ArgFlagsTy Flags = Ins[i].Flags; 164 if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { 165 #ifndef NDEBUG 166 dbgs() << "Call result #" << i << " has unhandled type " 167 << EVT(VT).getEVTString() << '\n'; 168 #endif 169 llvm_unreachable(nullptr); 170 } 171 } 172 } 173 174 /// Same as above except it's specialized for calls that produce a single value. 175 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { 176 if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { 177 #ifndef NDEBUG 178 dbgs() << "Call result has unhandled type " 179 << EVT(VT).getEVTString() << '\n'; 180 #endif 181 llvm_unreachable(nullptr); 182 } 183 } 184 185 void CCState::ensureMaxAlignment(Align Alignment) { 186 if (!AnalyzingMustTailForwardedRegs) 187 MF.getFrameInfo().ensureMaxAlignment(Alignment); 188 } 189 190 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) { 191 if (VT.isVector()) 192 return true; // Assume -msse-regparm might be in effect. 193 if (!VT.isInteger()) 194 return false; 195 return (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall); 196 } 197 198 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, 199 MVT VT, CCAssignFn Fn) { 200 unsigned SavedStackOffset = StackOffset; 201 Align SavedMaxStackArgAlign = MaxStackArgAlign; 202 unsigned NumLocs = Locs.size(); 203 204 // Set the 'inreg' flag if it is used for this calling convention. 205 ISD::ArgFlagsTy Flags; 206 if (isValueTypeInRegForCC(CallingConv, VT)) 207 Flags.setInReg(); 208 209 // Allocate something of this value type repeatedly until we get assigned a 210 // location in memory. 211 bool HaveRegParm; 212 do { 213 if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) { 214 #ifndef NDEBUG 215 dbgs() << "Call has unhandled type " << EVT(VT).getEVTString() 216 << " while computing remaining regparms\n"; 217 #endif 218 llvm_unreachable(nullptr); 219 } 220 HaveRegParm = Locs.back().isRegLoc(); 221 } while (HaveRegParm); 222 223 // Copy all the registers from the value locations we added. 224 assert(NumLocs < Locs.size() && "CC assignment failed to add location"); 225 for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I) 226 if (Locs[I].isRegLoc()) 227 Regs.push_back(MCPhysReg(Locs[I].getLocReg())); 228 229 // Clear the assigned values and stack memory. We leave the registers marked 230 // as allocated so that future queries don't return the same registers, i.e. 231 // when i64 and f64 are both passed in GPRs. 232 StackOffset = SavedStackOffset; 233 MaxStackArgAlign = SavedMaxStackArgAlign; 234 Locs.truncate(NumLocs); 235 } 236 237 void CCState::analyzeMustTailForwardedRegisters( 238 SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes, 239 CCAssignFn Fn) { 240 // Oftentimes calling conventions will not user register parameters for 241 // variadic functions, so we need to assume we're not variadic so that we get 242 // all the registers that might be used in a non-variadic call. 243 SaveAndRestore SavedVarArg(IsVarArg, false); 244 SaveAndRestore SavedMustTail(AnalyzingMustTailForwardedRegs, true); 245 246 for (MVT RegVT : RegParmTypes) { 247 SmallVector<MCPhysReg, 8> RemainingRegs; 248 getRemainingRegParmsForType(RemainingRegs, RegVT, Fn); 249 const TargetLowering *TL = MF.getSubtarget().getTargetLowering(); 250 const TargetRegisterClass *RC = TL->getRegClassFor(RegVT); 251 for (MCPhysReg PReg : RemainingRegs) { 252 Register VReg = MF.addLiveIn(PReg, RC); 253 Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT)); 254 } 255 } 256 } 257 258 bool CCState::resultsCompatible(CallingConv::ID CalleeCC, 259 CallingConv::ID CallerCC, MachineFunction &MF, 260 LLVMContext &C, 261 const SmallVectorImpl<ISD::InputArg> &Ins, 262 CCAssignFn CalleeFn, CCAssignFn CallerFn) { 263 if (CalleeCC == CallerCC) 264 return true; 265 SmallVector<CCValAssign, 4> RVLocs1; 266 CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C); 267 CCInfo1.AnalyzeCallResult(Ins, CalleeFn); 268 269 SmallVector<CCValAssign, 4> RVLocs2; 270 CCState CCInfo2(CallerCC, false, MF, RVLocs2, C); 271 CCInfo2.AnalyzeCallResult(Ins, CallerFn); 272 273 auto AreCompatible = [](const CCValAssign &Loc1, const CCValAssign &Loc2) { 274 assert(!Loc1.isPendingLoc() && !Loc2.isPendingLoc() && 275 "The location must have been decided by now"); 276 // Must fill the same part of their locations. 277 if (Loc1.getLocInfo() != Loc2.getLocInfo()) 278 return false; 279 // Must both be in the same registers, or both in memory at the same offset. 280 if (Loc1.isRegLoc() && Loc2.isRegLoc()) 281 return Loc1.getLocReg() == Loc2.getLocReg(); 282 if (Loc1.isMemLoc() && Loc2.isMemLoc()) 283 return Loc1.getLocMemOffset() == Loc2.getLocMemOffset(); 284 llvm_unreachable("Unknown location kind"); 285 }; 286 287 return std::equal(RVLocs1.begin(), RVLocs1.end(), RVLocs2.begin(), 288 RVLocs2.end(), AreCompatible); 289 } 290