xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/CallingConvLower.cpp (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the CCState class, used for lowering and implementing
10 // calling conventions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/CallingConvLower.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineRegisterInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetRegisterInfo.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/SaveAndRestore.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include <algorithm>
26 
27 using namespace llvm;
28 
29 CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
30                  SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
31     : CallingConv(CC), IsVarArg(isVarArg), MF(mf),
32       TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) {
33   // No stack is used.
34   StackOffset = 0;
35 
36   clearByValRegsInfo();
37   UsedRegs.resize((TRI.getNumRegs()+31)/32);
38 }
39 
40 /// Allocate space on the stack large enough to pass an argument by value.
41 /// The size and alignment information of the argument is encoded in
42 /// its parameter attribute.
43 void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
44                           CCValAssign::LocInfo LocInfo, int MinSize,
45                           Align MinAlign, ISD::ArgFlagsTy ArgFlags) {
46   Align Alignment = ArgFlags.getNonZeroByValAlign();
47   unsigned Size  = ArgFlags.getByValSize();
48   if (MinSize > (int)Size)
49     Size = MinSize;
50   if (MinAlign > Alignment)
51     Alignment = MinAlign;
52   ensureMaxAlignment(Alignment);
53   MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Alignment);
54   Size = unsigned(alignTo(Size, MinAlign));
55   unsigned Offset = AllocateStack(Size, Alignment);
56   addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
57 }
58 
59 /// Mark a register and all of its aliases as allocated.
60 void CCState::MarkAllocated(MCPhysReg Reg) {
61   for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
62     UsedRegs[*AI / 32] |= 1 << (*AI & 31);
63 }
64 
65 bool CCState::IsShadowAllocatedReg(MCRegister Reg) const {
66   if (!isAllocated(Reg))
67     return false;
68 
69   for (auto const &ValAssign : Locs) {
70     if (ValAssign.isRegLoc()) {
71       for (MCRegAliasIterator AI(ValAssign.getLocReg(), &TRI, true);
72            AI.isValid(); ++AI) {
73         if (*AI == Reg)
74           return false;
75       }
76     }
77   }
78   return true;
79 }
80 
81 /// Analyze an array of argument values,
82 /// incorporating info about the formals into this state.
83 void
84 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
85                                 CCAssignFn Fn) {
86   unsigned NumArgs = Ins.size();
87 
88   for (unsigned i = 0; i != NumArgs; ++i) {
89     MVT ArgVT = Ins[i].VT;
90     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
91     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this))
92       report_fatal_error("unable to allocate function argument #" + Twine(i));
93   }
94 }
95 
96 /// Analyze the return values of a function, returning true if the return can
97 /// be performed without sret-demotion and false otherwise.
98 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
99                           CCAssignFn Fn) {
100   // Determine which register each value should be copied into.
101   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
102     MVT VT = Outs[i].VT;
103     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
104     if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
105       return false;
106   }
107   return true;
108 }
109 
110 /// Analyze the returned values of a return,
111 /// incorporating info about the result values into this state.
112 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
113                             CCAssignFn Fn) {
114   // Determine which register each value should be copied into.
115   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
116     MVT VT = Outs[i].VT;
117     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
118     if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
119       report_fatal_error("unable to allocate function return #" + Twine(i));
120   }
121 }
122 
123 /// Analyze the outgoing arguments to a call,
124 /// incorporating info about the passed values into this state.
125 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
126                                   CCAssignFn Fn) {
127   unsigned NumOps = Outs.size();
128   for (unsigned i = 0; i != NumOps; ++i) {
129     MVT ArgVT = Outs[i].VT;
130     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
131     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
132 #ifndef NDEBUG
133       dbgs() << "Call operand #" << i << " has unhandled type "
134              << EVT(ArgVT).getEVTString() << '\n';
135 #endif
136       llvm_unreachable(nullptr);
137     }
138   }
139 }
140 
141 /// Same as above except it takes vectors of types and argument flags.
142 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
143                                   SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
144                                   CCAssignFn Fn) {
145   unsigned NumOps = ArgVTs.size();
146   for (unsigned i = 0; i != NumOps; ++i) {
147     MVT ArgVT = ArgVTs[i];
148     ISD::ArgFlagsTy ArgFlags = Flags[i];
149     if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
150 #ifndef NDEBUG
151       dbgs() << "Call operand #" << i << " has unhandled type "
152              << EVT(ArgVT).getEVTString() << '\n';
153 #endif
154       llvm_unreachable(nullptr);
155     }
156   }
157 }
158 
159 /// Analyze the return values of a call, incorporating info about the passed
160 /// values into this state.
161 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
162                                 CCAssignFn Fn) {
163   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
164     MVT VT = Ins[i].VT;
165     ISD::ArgFlagsTy Flags = Ins[i].Flags;
166     if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
167 #ifndef NDEBUG
168       dbgs() << "Call result #" << i << " has unhandled type "
169              << EVT(VT).getEVTString() << '\n';
170 #endif
171       llvm_unreachable(nullptr);
172     }
173   }
174 }
175 
176 /// Same as above except it's specialized for calls that produce a single value.
177 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
178   if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
179 #ifndef NDEBUG
180     dbgs() << "Call result has unhandled type "
181            << EVT(VT).getEVTString() << '\n';
182 #endif
183     llvm_unreachable(nullptr);
184   }
185 }
186 
187 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
188   if (VT.isVector())
189     return true; // Assume -msse-regparm might be in effect.
190   if (!VT.isInteger())
191     return false;
192   if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
193     return true;
194   return false;
195 }
196 
197 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
198                                           MVT VT, CCAssignFn Fn) {
199   unsigned SavedStackOffset = StackOffset;
200   Align SavedMaxStackArgAlign = MaxStackArgAlign;
201   unsigned NumLocs = Locs.size();
202 
203   // Set the 'inreg' flag if it is used for this calling convention.
204   ISD::ArgFlagsTy Flags;
205   if (isValueTypeInRegForCC(CallingConv, VT))
206     Flags.setInReg();
207 
208   // Allocate something of this value type repeatedly until we get assigned a
209   // location in memory.
210   bool HaveRegParm = true;
211   while (HaveRegParm) {
212     if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
213 #ifndef NDEBUG
214       dbgs() << "Call has unhandled type " << EVT(VT).getEVTString()
215              << " while computing remaining regparms\n";
216 #endif
217       llvm_unreachable(nullptr);
218     }
219     HaveRegParm = Locs.back().isRegLoc();
220   }
221 
222   // Copy all the registers from the value locations we added.
223   assert(NumLocs < Locs.size() && "CC assignment failed to add location");
224   for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
225     if (Locs[I].isRegLoc())
226       Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
227 
228   // Clear the assigned values and stack memory. We leave the registers marked
229   // as allocated so that future queries don't return the same registers, i.e.
230   // when i64 and f64 are both passed in GPRs.
231   StackOffset = SavedStackOffset;
232   MaxStackArgAlign = SavedMaxStackArgAlign;
233   Locs.resize(NumLocs);
234 }
235 
236 void CCState::analyzeMustTailForwardedRegisters(
237     SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
238     CCAssignFn Fn) {
239   // Oftentimes calling conventions will not user register parameters for
240   // variadic functions, so we need to assume we're not variadic so that we get
241   // all the registers that might be used in a non-variadic call.
242   SaveAndRestore<bool> SavedVarArg(IsVarArg, false);
243   SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true);
244 
245   for (MVT RegVT : RegParmTypes) {
246     SmallVector<MCPhysReg, 8> RemainingRegs;
247     getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
248     const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
249     const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
250     for (MCPhysReg PReg : RemainingRegs) {
251       unsigned VReg = MF.addLiveIn(PReg, RC);
252       Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
253     }
254   }
255 }
256 
257 bool CCState::resultsCompatible(CallingConv::ID CalleeCC,
258                                 CallingConv::ID CallerCC, MachineFunction &MF,
259                                 LLVMContext &C,
260                                 const SmallVectorImpl<ISD::InputArg> &Ins,
261                                 CCAssignFn CalleeFn, CCAssignFn CallerFn) {
262   if (CalleeCC == CallerCC)
263     return true;
264   SmallVector<CCValAssign, 4> RVLocs1;
265   CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C);
266   CCInfo1.AnalyzeCallResult(Ins, CalleeFn);
267 
268   SmallVector<CCValAssign, 4> RVLocs2;
269   CCState CCInfo2(CallerCC, false, MF, RVLocs2, C);
270   CCInfo2.AnalyzeCallResult(Ins, CallerFn);
271 
272   if (RVLocs1.size() != RVLocs2.size())
273     return false;
274   for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) {
275     const CCValAssign &Loc1 = RVLocs1[I];
276     const CCValAssign &Loc2 = RVLocs2[I];
277 
278     if ( // Must both be in registers, or both in memory
279         Loc1.isRegLoc() != Loc2.isRegLoc() ||
280         // Must fill the same part of their locations
281         Loc1.getLocInfo() != Loc2.getLocInfo() ||
282         // Memory offset/register number must be the same
283         Loc1.getExtraInfo() != Loc2.getExtraInfo())
284       return false;
285   }
286   return true;
287 }
288