1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CCState class, used for lowering and implementing 10 // calling conventions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/CallingConvLower.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/MachineRegisterInfo.h" 17 #include "llvm/CodeGen/TargetLowering.h" 18 #include "llvm/CodeGen/TargetRegisterInfo.h" 19 #include "llvm/CodeGen/TargetSubtargetInfo.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/ErrorHandling.h" 23 #include "llvm/Support/SaveAndRestore.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include <algorithm> 26 27 using namespace llvm; 28 29 CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf, 30 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C) 31 : CallingConv(CC), IsVarArg(isVarArg), MF(mf), 32 TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) { 33 // No stack is used. 34 StackOffset = 0; 35 MaxStackArgAlign = 1; 36 37 clearByValRegsInfo(); 38 UsedRegs.resize((TRI.getNumRegs()+31)/32); 39 } 40 41 /// Allocate space on the stack large enough to pass an argument by value. 42 /// The size and alignment information of the argument is encoded in 43 /// its parameter attribute. 44 void CCState::HandleByVal(unsigned ValNo, MVT ValVT, 45 MVT LocVT, CCValAssign::LocInfo LocInfo, 46 int MinSize, int MinAlign, 47 ISD::ArgFlagsTy ArgFlags) { 48 unsigned Align = ArgFlags.getByValAlign(); 49 unsigned Size = ArgFlags.getByValSize(); 50 if (MinSize > (int)Size) 51 Size = MinSize; 52 if (MinAlign > (int)Align) 53 Align = MinAlign; 54 ensureMaxAlignment(Align); 55 MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align); 56 Size = unsigned(alignTo(Size, MinAlign)); 57 unsigned Offset = AllocateStack(Size, Align); 58 addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 59 } 60 61 /// Mark a register and all of its aliases as allocated. 62 void CCState::MarkAllocated(unsigned Reg) { 63 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) 64 UsedRegs[*AI/32] |= 1 << (*AI&31); 65 } 66 67 bool CCState::IsShadowAllocatedReg(unsigned Reg) const { 68 if (!isAllocated(Reg)) 69 return false; 70 71 for (auto const &ValAssign : Locs) { 72 if (ValAssign.isRegLoc()) { 73 for (MCRegAliasIterator AI(ValAssign.getLocReg(), &TRI, true); 74 AI.isValid(); ++AI) { 75 if (*AI == Reg) 76 return false; 77 } 78 } 79 } 80 return true; 81 } 82 83 /// Analyze an array of argument values, 84 /// incorporating info about the formals into this state. 85 void 86 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins, 87 CCAssignFn Fn) { 88 unsigned NumArgs = Ins.size(); 89 90 for (unsigned i = 0; i != NumArgs; ++i) { 91 MVT ArgVT = Ins[i].VT; 92 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 93 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 94 #ifndef NDEBUG 95 dbgs() << "Formal argument #" << i << " has unhandled type " 96 << EVT(ArgVT).getEVTString() << '\n'; 97 #endif 98 llvm_unreachable(nullptr); 99 } 100 } 101 } 102 103 /// Analyze the return values of a function, returning true if the return can 104 /// be performed without sret-demotion and false otherwise. 105 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 106 CCAssignFn Fn) { 107 // Determine which register each value should be copied into. 108 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 109 MVT VT = Outs[i].VT; 110 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 111 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) 112 return false; 113 } 114 return true; 115 } 116 117 /// Analyze the returned values of a return, 118 /// incorporating info about the result values into this state. 119 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 120 CCAssignFn Fn) { 121 // Determine which register each value should be copied into. 122 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 123 MVT VT = Outs[i].VT; 124 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 125 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) { 126 #ifndef NDEBUG 127 dbgs() << "Return operand #" << i << " has unhandled type " 128 << EVT(VT).getEVTString() << '\n'; 129 #endif 130 llvm_unreachable(nullptr); 131 } 132 } 133 } 134 135 /// Analyze the outgoing arguments to a call, 136 /// incorporating info about the passed values into this state. 137 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs, 138 CCAssignFn Fn) { 139 unsigned NumOps = Outs.size(); 140 for (unsigned i = 0; i != NumOps; ++i) { 141 MVT ArgVT = Outs[i].VT; 142 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 143 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 144 #ifndef NDEBUG 145 dbgs() << "Call operand #" << i << " has unhandled type " 146 << EVT(ArgVT).getEVTString() << '\n'; 147 #endif 148 llvm_unreachable(nullptr); 149 } 150 } 151 } 152 153 /// Same as above except it takes vectors of types and argument flags. 154 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs, 155 SmallVectorImpl<ISD::ArgFlagsTy> &Flags, 156 CCAssignFn Fn) { 157 unsigned NumOps = ArgVTs.size(); 158 for (unsigned i = 0; i != NumOps; ++i) { 159 MVT ArgVT = ArgVTs[i]; 160 ISD::ArgFlagsTy ArgFlags = Flags[i]; 161 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 162 #ifndef NDEBUG 163 dbgs() << "Call operand #" << i << " has unhandled type " 164 << EVT(ArgVT).getEVTString() << '\n'; 165 #endif 166 llvm_unreachable(nullptr); 167 } 168 } 169 } 170 171 /// Analyze the return values of a call, incorporating info about the passed 172 /// values into this state. 173 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, 174 CCAssignFn Fn) { 175 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 176 MVT VT = Ins[i].VT; 177 ISD::ArgFlagsTy Flags = Ins[i].Flags; 178 if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { 179 #ifndef NDEBUG 180 dbgs() << "Call result #" << i << " has unhandled type " 181 << EVT(VT).getEVTString() << '\n'; 182 #endif 183 llvm_unreachable(nullptr); 184 } 185 } 186 } 187 188 /// Same as above except it's specialized for calls that produce a single value. 189 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { 190 if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { 191 #ifndef NDEBUG 192 dbgs() << "Call result has unhandled type " 193 << EVT(VT).getEVTString() << '\n'; 194 #endif 195 llvm_unreachable(nullptr); 196 } 197 } 198 199 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) { 200 if (VT.isVector()) 201 return true; // Assume -msse-regparm might be in effect. 202 if (!VT.isInteger()) 203 return false; 204 if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall) 205 return true; 206 return false; 207 } 208 209 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, 210 MVT VT, CCAssignFn Fn) { 211 unsigned SavedStackOffset = StackOffset; 212 unsigned SavedMaxStackArgAlign = MaxStackArgAlign; 213 unsigned NumLocs = Locs.size(); 214 215 // Set the 'inreg' flag if it is used for this calling convention. 216 ISD::ArgFlagsTy Flags; 217 if (isValueTypeInRegForCC(CallingConv, VT)) 218 Flags.setInReg(); 219 220 // Allocate something of this value type repeatedly until we get assigned a 221 // location in memory. 222 bool HaveRegParm = true; 223 while (HaveRegParm) { 224 if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) { 225 #ifndef NDEBUG 226 dbgs() << "Call has unhandled type " << EVT(VT).getEVTString() 227 << " while computing remaining regparms\n"; 228 #endif 229 llvm_unreachable(nullptr); 230 } 231 HaveRegParm = Locs.back().isRegLoc(); 232 } 233 234 // Copy all the registers from the value locations we added. 235 assert(NumLocs < Locs.size() && "CC assignment failed to add location"); 236 for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I) 237 if (Locs[I].isRegLoc()) 238 Regs.push_back(MCPhysReg(Locs[I].getLocReg())); 239 240 // Clear the assigned values and stack memory. We leave the registers marked 241 // as allocated so that future queries don't return the same registers, i.e. 242 // when i64 and f64 are both passed in GPRs. 243 StackOffset = SavedStackOffset; 244 MaxStackArgAlign = SavedMaxStackArgAlign; 245 Locs.resize(NumLocs); 246 } 247 248 void CCState::analyzeMustTailForwardedRegisters( 249 SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes, 250 CCAssignFn Fn) { 251 // Oftentimes calling conventions will not user register parameters for 252 // variadic functions, so we need to assume we're not variadic so that we get 253 // all the registers that might be used in a non-variadic call. 254 SaveAndRestore<bool> SavedVarArg(IsVarArg, false); 255 SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true); 256 257 for (MVT RegVT : RegParmTypes) { 258 SmallVector<MCPhysReg, 8> RemainingRegs; 259 getRemainingRegParmsForType(RemainingRegs, RegVT, Fn); 260 const TargetLowering *TL = MF.getSubtarget().getTargetLowering(); 261 const TargetRegisterClass *RC = TL->getRegClassFor(RegVT); 262 for (MCPhysReg PReg : RemainingRegs) { 263 unsigned VReg = MF.addLiveIn(PReg, RC); 264 Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT)); 265 } 266 } 267 } 268 269 bool CCState::resultsCompatible(CallingConv::ID CalleeCC, 270 CallingConv::ID CallerCC, MachineFunction &MF, 271 LLVMContext &C, 272 const SmallVectorImpl<ISD::InputArg> &Ins, 273 CCAssignFn CalleeFn, CCAssignFn CallerFn) { 274 if (CalleeCC == CallerCC) 275 return true; 276 SmallVector<CCValAssign, 4> RVLocs1; 277 CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C); 278 CCInfo1.AnalyzeCallResult(Ins, CalleeFn); 279 280 SmallVector<CCValAssign, 4> RVLocs2; 281 CCState CCInfo2(CallerCC, false, MF, RVLocs2, C); 282 CCInfo2.AnalyzeCallResult(Ins, CallerFn); 283 284 if (RVLocs1.size() != RVLocs2.size()) 285 return false; 286 for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) { 287 const CCValAssign &Loc1 = RVLocs1[I]; 288 const CCValAssign &Loc2 = RVLocs2[I]; 289 if (Loc1.getLocInfo() != Loc2.getLocInfo()) 290 return false; 291 bool RegLoc1 = Loc1.isRegLoc(); 292 if (RegLoc1 != Loc2.isRegLoc()) 293 return false; 294 if (RegLoc1) { 295 if (Loc1.getLocReg() != Loc2.getLocReg()) 296 return false; 297 } else { 298 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset()) 299 return false; 300 } 301 } 302 return true; 303 } 304