xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/CallingConvLower.cpp (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the CCState class, used for lowering and implementing
10 // calling conventions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/CallingConvLower.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetRegisterInfo.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/SaveAndRestore.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
CCState(CallingConv::ID CC,bool IsVarArg,MachineFunction & MF,SmallVectorImpl<CCValAssign> & Locs,LLVMContext & Context,bool NegativeOffsets)28 CCState::CCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
29                  SmallVectorImpl<CCValAssign> &Locs, LLVMContext &Context,
30                  bool NegativeOffsets)
31     : CallingConv(CC), IsVarArg(IsVarArg), MF(MF),
32       TRI(*MF.getSubtarget().getRegisterInfo()), Locs(Locs), Context(Context),
33       NegativeOffsets(NegativeOffsets) {
34 
35   // No stack is used.
36   StackSize = 0;
37 
38   clearByValRegsInfo();
39   UsedRegs.resize((TRI.getNumRegs()+31)/32);
40 }
41 
42 /// Allocate space on the stack large enough to pass an argument by value.
43 /// The size and alignment information of the argument is encoded in
44 /// its parameter attribute.
HandleByVal(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,int MinSize,Align MinAlign,ISD::ArgFlagsTy ArgFlags)45 void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
46                           CCValAssign::LocInfo LocInfo, int MinSize,
47                           Align MinAlign, ISD::ArgFlagsTy ArgFlags) {
48   Align Alignment = ArgFlags.getNonZeroByValAlign();
49   unsigned Size  = ArgFlags.getByValSize();
50   if (MinSize > (int)Size)
51     Size = MinSize;
52   if (MinAlign > Alignment)
53     Alignment = MinAlign;
54   ensureMaxAlignment(Alignment);
55   MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Alignment);
56   Size = unsigned(alignTo(Size, MinAlign));
57   uint64_t Offset = AllocateStack(Size, Alignment);
58   addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
59 }
60 
61 /// Mark a register and all of its aliases as allocated.
MarkAllocated(MCPhysReg Reg)62 void CCState::MarkAllocated(MCPhysReg Reg) {
63   for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
64     UsedRegs[*AI / 32] |= 1 << (*AI & 31);
65 }
66 
MarkUnallocated(MCPhysReg Reg)67 void CCState::MarkUnallocated(MCPhysReg Reg) {
68   for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
69     UsedRegs[*AI / 32] &= ~(1 << (*AI & 31));
70 }
71 
IsShadowAllocatedReg(MCRegister Reg) const72 bool CCState::IsShadowAllocatedReg(MCRegister Reg) const {
73   if (!isAllocated(Reg))
74     return false;
75 
76   for (auto const &ValAssign : Locs)
77     if (ValAssign.isRegLoc() && TRI.regsOverlap(ValAssign.getLocReg(), Reg))
78       return false;
79   return true;
80 }
81 
82 /// Analyze an array of argument values,
83 /// incorporating info about the formals into this state.
84 void
AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> & Ins,CCAssignFn Fn)85 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
86                                 CCAssignFn Fn) {
87   unsigned NumArgs = Ins.size();
88 
89   for (unsigned i = 0; i != NumArgs; ++i) {
90     MVT ArgVT = Ins[i].VT;
91     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
92     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this))
93       report_fatal_error("unable to allocate function argument #" + Twine(i));
94   }
95 }
96 
97 /// Analyze the return values of a function, returning true if the return can
98 /// be performed without sret-demotion and false otherwise.
CheckReturn(const SmallVectorImpl<ISD::OutputArg> & Outs,CCAssignFn Fn)99 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
100                           CCAssignFn Fn) {
101   // Determine which register each value should be copied into.
102   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
103     MVT VT = Outs[i].VT;
104     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
105     if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
106       return false;
107   }
108   return true;
109 }
110 
111 /// Analyze the returned values of a return,
112 /// incorporating info about the result values into this state.
AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> & Outs,CCAssignFn Fn)113 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
114                             CCAssignFn Fn) {
115   // Determine which register each value should be copied into.
116   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
117     MVT VT = Outs[i].VT;
118     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
119     if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
120       report_fatal_error("unable to allocate function return #" + Twine(i));
121   }
122 }
123 
124 /// Analyze the outgoing arguments to a call,
125 /// incorporating info about the passed values into this state.
AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> & Outs,CCAssignFn Fn)126 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
127                                   CCAssignFn Fn) {
128   unsigned NumOps = Outs.size();
129   for (unsigned i = 0; i != NumOps; ++i) {
130     MVT ArgVT = Outs[i].VT;
131     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
132     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
133 #ifndef NDEBUG
134       dbgs() << "Call operand #" << i << " has unhandled type "
135              << ArgVT << '\n';
136 #endif
137       llvm_unreachable(nullptr);
138     }
139   }
140 }
141 
142 /// Same as above except it takes vectors of types and argument flags.
AnalyzeCallOperands(SmallVectorImpl<MVT> & ArgVTs,SmallVectorImpl<ISD::ArgFlagsTy> & Flags,CCAssignFn Fn)143 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
144                                   SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
145                                   CCAssignFn Fn) {
146   unsigned NumOps = ArgVTs.size();
147   for (unsigned i = 0; i != NumOps; ++i) {
148     MVT ArgVT = ArgVTs[i];
149     ISD::ArgFlagsTy ArgFlags = Flags[i];
150     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
151 #ifndef NDEBUG
152       dbgs() << "Call operand #" << i << " has unhandled type "
153              << ArgVT << '\n';
154 #endif
155       llvm_unreachable(nullptr);
156     }
157   }
158 }
159 
160 /// Analyze the return values of a call, incorporating info about the passed
161 /// values into this state.
AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> & Ins,CCAssignFn Fn)162 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
163                                 CCAssignFn Fn) {
164   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
165     MVT VT = Ins[i].VT;
166     ISD::ArgFlagsTy Flags = Ins[i].Flags;
167     if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
168 #ifndef NDEBUG
169       dbgs() << "Call result #" << i << " has unhandled type "
170              << VT << '\n';
171 #endif
172       llvm_unreachable(nullptr);
173     }
174   }
175 }
176 
177 /// Same as above except it's specialized for calls that produce a single value.
AnalyzeCallResult(MVT VT,CCAssignFn Fn)178 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
179   if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
180 #ifndef NDEBUG
181     dbgs() << "Call result has unhandled type "
182            << VT << '\n';
183 #endif
184     llvm_unreachable(nullptr);
185   }
186 }
187 
ensureMaxAlignment(Align Alignment)188 void CCState::ensureMaxAlignment(Align Alignment) {
189   if (!AnalyzingMustTailForwardedRegs)
190     MF.getFrameInfo().ensureMaxAlignment(Alignment);
191 }
192 
isValueTypeInRegForCC(CallingConv::ID CC,MVT VT)193 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
194   if (VT.isVector())
195     return true; // Assume -msse-regparm might be in effect.
196   if (!VT.isInteger())
197     return false;
198   return (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall);
199 }
200 
getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> & Regs,MVT VT,CCAssignFn Fn)201 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
202                                           MVT VT, CCAssignFn Fn) {
203   uint64_t SavedStackSize = StackSize;
204   Align SavedMaxStackArgAlign = MaxStackArgAlign;
205   unsigned NumLocs = Locs.size();
206 
207   // Set the 'inreg' flag if it is used for this calling convention.
208   ISD::ArgFlagsTy Flags;
209   if (isValueTypeInRegForCC(CallingConv, VT))
210     Flags.setInReg();
211 
212   // Allocate something of this value type repeatedly until we get assigned a
213   // location in memory.
214   bool HaveRegParm;
215   do {
216     if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
217 #ifndef NDEBUG
218       dbgs() << "Call has unhandled type " << VT
219              << " while computing remaining regparms\n";
220 #endif
221       llvm_unreachable(nullptr);
222     }
223     HaveRegParm = Locs.back().isRegLoc();
224   } while (HaveRegParm);
225 
226   // Copy all the registers from the value locations we added.
227   assert(NumLocs < Locs.size() && "CC assignment failed to add location");
228   for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
229     if (Locs[I].isRegLoc())
230       Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
231 
232   // Clear the assigned values and stack memory. We leave the registers marked
233   // as allocated so that future queries don't return the same registers, i.e.
234   // when i64 and f64 are both passed in GPRs.
235   StackSize = SavedStackSize;
236   MaxStackArgAlign = SavedMaxStackArgAlign;
237   Locs.truncate(NumLocs);
238 }
239 
analyzeMustTailForwardedRegisters(SmallVectorImpl<ForwardedRegister> & Forwards,ArrayRef<MVT> RegParmTypes,CCAssignFn Fn)240 void CCState::analyzeMustTailForwardedRegisters(
241     SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
242     CCAssignFn Fn) {
243   // Oftentimes calling conventions will not user register parameters for
244   // variadic functions, so we need to assume we're not variadic so that we get
245   // all the registers that might be used in a non-variadic call.
246   SaveAndRestore SavedVarArg(IsVarArg, false);
247   SaveAndRestore SavedMustTail(AnalyzingMustTailForwardedRegs, true);
248 
249   for (MVT RegVT : RegParmTypes) {
250     SmallVector<MCPhysReg, 8> RemainingRegs;
251     getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
252     const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
253     const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
254     for (MCPhysReg PReg : RemainingRegs) {
255       Register VReg = MF.addLiveIn(PReg, RC);
256       Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
257     }
258   }
259 }
260 
resultsCompatible(CallingConv::ID CalleeCC,CallingConv::ID CallerCC,MachineFunction & MF,LLVMContext & C,const SmallVectorImpl<ISD::InputArg> & Ins,CCAssignFn CalleeFn,CCAssignFn CallerFn)261 bool CCState::resultsCompatible(CallingConv::ID CalleeCC,
262                                 CallingConv::ID CallerCC, MachineFunction &MF,
263                                 LLVMContext &C,
264                                 const SmallVectorImpl<ISD::InputArg> &Ins,
265                                 CCAssignFn CalleeFn, CCAssignFn CallerFn) {
266   if (CalleeCC == CallerCC)
267     return true;
268   SmallVector<CCValAssign, 4> RVLocs1;
269   CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C);
270   CCInfo1.AnalyzeCallResult(Ins, CalleeFn);
271 
272   SmallVector<CCValAssign, 4> RVLocs2;
273   CCState CCInfo2(CallerCC, false, MF, RVLocs2, C);
274   CCInfo2.AnalyzeCallResult(Ins, CallerFn);
275 
276   auto AreCompatible = [](const CCValAssign &Loc1, const CCValAssign &Loc2) {
277     assert(!Loc1.isPendingLoc() && !Loc2.isPendingLoc() &&
278            "The location must have been decided by now");
279     // Must fill the same part of their locations.
280     if (Loc1.getLocInfo() != Loc2.getLocInfo())
281       return false;
282     // Must both be in the same registers, or both in memory at the same offset.
283     if (Loc1.isRegLoc() && Loc2.isRegLoc())
284       return Loc1.getLocReg() == Loc2.getLocReg();
285     if (Loc1.isMemLoc() && Loc2.isMemLoc())
286       return Loc1.getLocMemOffset() == Loc2.getLocMemOffset();
287     llvm_unreachable("Unknown location kind");
288   };
289 
290   return std::equal(RVLocs1.begin(), RVLocs1.end(), RVLocs2.begin(),
291                     RVLocs2.end(), AreCompatible);
292 }
293