xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===- SIMachineFunctionInfo.cpp - SI Machine Function Info ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "SIMachineFunctionInfo.h"
10 #include "AMDGPUSubtarget.h"
11 #include "GCNSubtarget.h"
12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
13 #include "SIRegisterInfo.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "llvm/CodeGen/LiveIntervals.h"
16 #include "llvm/CodeGen/MIRParser/MIParser.h"
17 #include "llvm/CodeGen/MachineBasicBlock.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/DiagnosticInfo.h"
23 #include "llvm/IR/Function.h"
24 #include <cassert>
25 #include <optional>
26 #include <vector>
27 
28 enum { MAX_LANES = 64 };
29 
30 using namespace llvm;
31 
32 const GCNTargetMachine &getTM(const GCNSubtarget *STI) {
33   const SITargetLowering *TLI = STI->getTargetLowering();
34   return static_cast<const GCNTargetMachine &>(TLI->getTargetMachine());
35 }
36 
37 SIMachineFunctionInfo::SIMachineFunctionInfo(const Function &F,
38                                              const GCNSubtarget *STI)
39     : AMDGPUMachineFunction(F, *STI), Mode(F, *STI), GWSResourcePSV(getTM(STI)),
40       UserSGPRInfo(F, *STI), WorkGroupIDX(false), WorkGroupIDY(false),
41       WorkGroupIDZ(false), WorkGroupInfo(false), LDSKernelId(false),
42       PrivateSegmentWaveByteOffset(false), WorkItemIDX(false),
43       WorkItemIDY(false), WorkItemIDZ(false), ImplicitArgPtr(false),
44       GITPtrHigh(0xffffffff), HighBitsOf32BitAddress(0) {
45   const GCNSubtarget &ST = *static_cast<const GCNSubtarget *>(STI);
46   FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
47   WavesPerEU = ST.getWavesPerEU(F);
48   MaxNumWorkGroups = ST.getMaxNumWorkGroups(F);
49   assert(MaxNumWorkGroups.size() == 3);
50 
51   // Temporarily check both the attribute and the subtarget feature, until the
52   // latter is completely removed.
53   DynamicVGPRBlockSize = AMDGPU::getDynamicVGPRBlockSize(F);
54   if (DynamicVGPRBlockSize == 0 && ST.isDynamicVGPREnabled())
55     DynamicVGPRBlockSize = ST.getDynamicVGPRBlockSize();
56 
57   Occupancy = ST.computeOccupancy(F, getLDSSize()).second;
58   CallingConv::ID CC = F.getCallingConv();
59 
60   VRegFlags.reserve(1024);
61 
62   const bool IsKernel = CC == CallingConv::AMDGPU_KERNEL ||
63                         CC == CallingConv::SPIR_KERNEL;
64 
65   if (IsKernel) {
66     WorkGroupIDX = true;
67     WorkItemIDX = true;
68   } else if (CC == CallingConv::AMDGPU_PS) {
69     PSInputAddr = AMDGPU::getInitialPSInputAddr(F);
70   }
71 
72   MayNeedAGPRs = ST.hasMAIInsts();
73   if (ST.hasGFX90AInsts() &&
74       ST.getMaxNumVGPRs(F) <= AMDGPU::VGPR_32RegClass.getNumRegs() &&
75       !mayUseAGPRs(F))
76     MayNeedAGPRs = false; // We will select all MAI with VGPR operands.
77 
78   if (AMDGPU::isChainCC(CC)) {
79     // Chain functions don't receive an SP from their caller, but are free to
80     // set one up. For now, we can use s32 to match what amdgpu_gfx functions
81     // would use if called, but this can be revisited.
82     // FIXME: Only reserve this if we actually need it.
83     StackPtrOffsetReg = AMDGPU::SGPR32;
84 
85     ScratchRSrcReg = AMDGPU::SGPR48_SGPR49_SGPR50_SGPR51;
86 
87     ArgInfo.PrivateSegmentBuffer =
88         ArgDescriptor::createRegister(ScratchRSrcReg);
89 
90     ImplicitArgPtr = false;
91   } else if (!isEntryFunction()) {
92     if (CC != CallingConv::AMDGPU_Gfx)
93       ArgInfo = AMDGPUArgumentUsageInfo::FixedABIFunctionInfo;
94 
95     FrameOffsetReg = AMDGPU::SGPR33;
96     StackPtrOffsetReg = AMDGPU::SGPR32;
97 
98     if (!ST.enableFlatScratch()) {
99       // Non-entry functions have no special inputs for now, other registers
100       // required for scratch access.
101       ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
102 
103       ArgInfo.PrivateSegmentBuffer =
104         ArgDescriptor::createRegister(ScratchRSrcReg);
105     }
106 
107     if (!F.hasFnAttribute("amdgpu-no-implicitarg-ptr"))
108       ImplicitArgPtr = true;
109   } else {
110     ImplicitArgPtr = false;
111     MaxKernArgAlign =
112         std::max(ST.getAlignmentForImplicitArgPtr(), MaxKernArgAlign);
113   }
114 
115   if (!AMDGPU::isGraphics(CC) ||
116       ((CC == CallingConv::AMDGPU_CS || CC == CallingConv::AMDGPU_Gfx) &&
117        ST.hasArchitectedSGPRs())) {
118     if (IsKernel || !F.hasFnAttribute("amdgpu-no-workgroup-id-x"))
119       WorkGroupIDX = true;
120 
121     if (!F.hasFnAttribute("amdgpu-no-workgroup-id-y"))
122       WorkGroupIDY = true;
123 
124     if (!F.hasFnAttribute("amdgpu-no-workgroup-id-z"))
125       WorkGroupIDZ = true;
126   }
127 
128   if (!AMDGPU::isGraphics(CC)) {
129     if (IsKernel || !F.hasFnAttribute("amdgpu-no-workitem-id-x"))
130       WorkItemIDX = true;
131 
132     if (!F.hasFnAttribute("amdgpu-no-workitem-id-y") &&
133         ST.getMaxWorkitemID(F, 1) != 0)
134       WorkItemIDY = true;
135 
136     if (!F.hasFnAttribute("amdgpu-no-workitem-id-z") &&
137         ST.getMaxWorkitemID(F, 2) != 0)
138       WorkItemIDZ = true;
139 
140     if (!IsKernel && !F.hasFnAttribute("amdgpu-no-lds-kernel-id"))
141       LDSKernelId = true;
142   }
143 
144   if (isEntryFunction()) {
145     // X, XY, and XYZ are the only supported combinations, so make sure Y is
146     // enabled if Z is.
147     if (WorkItemIDZ)
148       WorkItemIDY = true;
149 
150     if (!ST.flatScratchIsArchitected()) {
151       PrivateSegmentWaveByteOffset = true;
152 
153       // HS and GS always have the scratch wave offset in SGPR5 on GFX9.
154       if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 &&
155           (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS))
156         ArgInfo.PrivateSegmentWaveByteOffset =
157             ArgDescriptor::createRegister(AMDGPU::SGPR5);
158     }
159   }
160 
161   Attribute A = F.getFnAttribute("amdgpu-git-ptr-high");
162   StringRef S = A.getValueAsString();
163   if (!S.empty())
164     S.consumeInteger(0, GITPtrHigh);
165 
166   A = F.getFnAttribute("amdgpu-32bit-address-high-bits");
167   S = A.getValueAsString();
168   if (!S.empty())
169     S.consumeInteger(0, HighBitsOf32BitAddress);
170 
171   MaxMemoryClusterDWords = F.getFnAttributeAsParsedInteger(
172       "amdgpu-max-memory-cluster-dwords", DefaultMemoryClusterDWordsLimit);
173 
174   // On GFX908, in order to guarantee copying between AGPRs, we need a scratch
175   // VGPR available at all times. For now, reserve highest available VGPR. After
176   // RA, shift it to the lowest available unused VGPR if the one exist.
177   if (ST.hasMAIInsts() && !ST.hasGFX90AInsts()) {
178     VGPRForAGPRCopy =
179         AMDGPU::VGPR_32RegClass.getRegister(ST.getMaxNumVGPRs(F) - 1);
180   }
181 }
182 
183 MachineFunctionInfo *SIMachineFunctionInfo::clone(
184     BumpPtrAllocator &Allocator, MachineFunction &DestMF,
185     const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
186     const {
187   return DestMF.cloneInfo<SIMachineFunctionInfo>(*this);
188 }
189 
190 void SIMachineFunctionInfo::limitOccupancy(const MachineFunction &MF) {
191   limitOccupancy(getMaxWavesPerEU());
192   const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>();
193   limitOccupancy(ST.getOccupancyWithWorkGroupSizes(MF).second);
194 }
195 
196 Register SIMachineFunctionInfo::addPrivateSegmentBuffer(
197   const SIRegisterInfo &TRI) {
198   ArgInfo.PrivateSegmentBuffer =
199     ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
200     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SGPR_128RegClass));
201   NumUserSGPRs += 4;
202   return ArgInfo.PrivateSegmentBuffer.getRegister();
203 }
204 
205 Register SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) {
206   ArgInfo.DispatchPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
207     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
208   NumUserSGPRs += 2;
209   return ArgInfo.DispatchPtr.getRegister();
210 }
211 
212 Register SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) {
213   ArgInfo.QueuePtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
214     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
215   NumUserSGPRs += 2;
216   return ArgInfo.QueuePtr.getRegister();
217 }
218 
219 Register SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) {
220   ArgInfo.KernargSegmentPtr
221     = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
222     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
223   NumUserSGPRs += 2;
224   return ArgInfo.KernargSegmentPtr.getRegister();
225 }
226 
227 Register SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) {
228   ArgInfo.DispatchID = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
229     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
230   NumUserSGPRs += 2;
231   return ArgInfo.DispatchID.getRegister();
232 }
233 
234 Register SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) {
235   ArgInfo.FlatScratchInit = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
236     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
237   NumUserSGPRs += 2;
238   return ArgInfo.FlatScratchInit.getRegister();
239 }
240 
241 Register SIMachineFunctionInfo::addPrivateSegmentSize(const SIRegisterInfo &TRI) {
242   ArgInfo.PrivateSegmentSize = ArgDescriptor::createRegister(getNextUserSGPR());
243   NumUserSGPRs += 1;
244   return ArgInfo.PrivateSegmentSize.getRegister();
245 }
246 
247 Register SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) {
248   ArgInfo.ImplicitBufferPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg(
249     getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass));
250   NumUserSGPRs += 2;
251   return ArgInfo.ImplicitBufferPtr.getRegister();
252 }
253 
254 Register SIMachineFunctionInfo::addLDSKernelId() {
255   ArgInfo.LDSKernelId = ArgDescriptor::createRegister(getNextUserSGPR());
256   NumUserSGPRs += 1;
257   return ArgInfo.LDSKernelId.getRegister();
258 }
259 
260 SmallVectorImpl<MCRegister> *SIMachineFunctionInfo::addPreloadedKernArg(
261     const SIRegisterInfo &TRI, const TargetRegisterClass *RC,
262     unsigned AllocSizeDWord, int KernArgIdx, int PaddingSGPRs) {
263   auto [It, Inserted] = ArgInfo.PreloadKernArgs.try_emplace(KernArgIdx);
264   assert(Inserted && "Preload kernel argument allocated twice.");
265   NumUserSGPRs += PaddingSGPRs;
266   // If the available register tuples are aligned with the kernarg to be
267   // preloaded use that register, otherwise we need to use a set of SGPRs and
268   // merge them.
269   if (!ArgInfo.FirstKernArgPreloadReg)
270     ArgInfo.FirstKernArgPreloadReg = getNextUserSGPR();
271   Register PreloadReg =
272       TRI.getMatchingSuperReg(getNextUserSGPR(), AMDGPU::sub0, RC);
273   auto &Regs = It->second.Regs;
274   if (PreloadReg &&
275       (RC == &AMDGPU::SReg_32RegClass || RC == &AMDGPU::SReg_64RegClass)) {
276     Regs.push_back(PreloadReg);
277     NumUserSGPRs += AllocSizeDWord;
278   } else {
279     Regs.reserve(AllocSizeDWord);
280     for (unsigned I = 0; I < AllocSizeDWord; ++I) {
281       Regs.push_back(getNextUserSGPR());
282       NumUserSGPRs++;
283     }
284   }
285 
286   // Track the actual number of SGPRs that HW will preload to.
287   UserSGPRInfo.allocKernargPreloadSGPRs(AllocSizeDWord + PaddingSGPRs);
288   return &Regs;
289 }
290 
291 void SIMachineFunctionInfo::allocateWWMSpill(MachineFunction &MF, Register VGPR,
292                                              uint64_t Size, Align Alignment) {
293   // Skip if it is an entry function or the register is already added.
294   if (isEntryFunction() || WWMSpills.count(VGPR))
295     return;
296 
297   // Skip if this is a function with the amdgpu_cs_chain or
298   // amdgpu_cs_chain_preserve calling convention and this is a scratch register.
299   // We never need to allocate a spill for these because we don't even need to
300   // restore the inactive lanes for them (they're scratchier than the usual
301   // scratch registers). We only need to do this if we have calls to
302   // llvm.amdgcn.cs.chain (otherwise there's no one to save them for, since
303   // chain functions do not return) and the function did not contain a call to
304   // llvm.amdgcn.init.whole.wave (since in that case there are no inactive lanes
305   // when entering the function).
306   if (isChainFunction() &&
307       (SIRegisterInfo::isChainScratchRegister(VGPR) ||
308        !MF.getFrameInfo().hasTailCall() || hasInitWholeWave()))
309     return;
310 
311   WWMSpills.insert(std::make_pair(
312       VGPR, MF.getFrameInfo().CreateSpillStackObject(Size, Alignment)));
313 }
314 
315 // Separate out the callee-saved and scratch registers.
316 void SIMachineFunctionInfo::splitWWMSpillRegisters(
317     MachineFunction &MF,
318     SmallVectorImpl<std::pair<Register, int>> &CalleeSavedRegs,
319     SmallVectorImpl<std::pair<Register, int>> &ScratchRegs) const {
320   const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
321   for (auto &Reg : WWMSpills) {
322     if (isCalleeSavedReg(CSRegs, Reg.first))
323       CalleeSavedRegs.push_back(Reg);
324     else
325       ScratchRegs.push_back(Reg);
326   }
327 }
328 
329 bool SIMachineFunctionInfo::isCalleeSavedReg(const MCPhysReg *CSRegs,
330                                              MCPhysReg Reg) const {
331   for (unsigned I = 0; CSRegs[I]; ++I) {
332     if (CSRegs[I] == Reg)
333       return true;
334   }
335 
336   return false;
337 }
338 
339 void SIMachineFunctionInfo::shiftWwmVGPRsToLowestRange(
340     MachineFunction &MF, SmallVectorImpl<Register> &WWMVGPRs,
341     BitVector &SavedVGPRs) {
342   const SIRegisterInfo *TRI = MF.getSubtarget<GCNSubtarget>().getRegisterInfo();
343   MachineRegisterInfo &MRI = MF.getRegInfo();
344   for (unsigned I = 0, E = WWMVGPRs.size(); I < E; ++I) {
345     Register Reg = WWMVGPRs[I];
346     Register NewReg =
347         TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF);
348     if (!NewReg || NewReg >= Reg)
349       break;
350 
351     MRI.replaceRegWith(Reg, NewReg);
352 
353     // Update various tables with the new VGPR.
354     WWMVGPRs[I] = NewReg;
355     WWMReservedRegs.remove(Reg);
356     WWMReservedRegs.insert(NewReg);
357     MRI.reserveReg(NewReg, TRI);
358 
359     // Replace the register in SpillPhysVGPRs. This is needed to look for free
360     // lanes while spilling special SGPRs like FP, BP, etc. during PEI.
361     auto *RegItr = llvm::find(SpillPhysVGPRs, Reg);
362     if (RegItr != SpillPhysVGPRs.end()) {
363       unsigned Idx = std::distance(SpillPhysVGPRs.begin(), RegItr);
364       SpillPhysVGPRs[Idx] = NewReg;
365     }
366 
367     // The generic `determineCalleeSaves` might have set the old register if it
368     // is in the CSR range.
369     SavedVGPRs.reset(Reg);
370 
371     for (MachineBasicBlock &MBB : MF) {
372       MBB.removeLiveIn(Reg);
373       MBB.sortUniqueLiveIns();
374     }
375 
376     Reg = NewReg;
377   }
378 }
379 
380 bool SIMachineFunctionInfo::allocateVirtualVGPRForSGPRSpills(
381     MachineFunction &MF, int FI, unsigned LaneIndex) {
382   MachineRegisterInfo &MRI = MF.getRegInfo();
383   Register LaneVGPR;
384   if (!LaneIndex) {
385     LaneVGPR = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
386     SpillVGPRs.push_back(LaneVGPR);
387   } else {
388     LaneVGPR = SpillVGPRs.back();
389   }
390 
391   SGPRSpillsToVirtualVGPRLanes[FI].emplace_back(LaneVGPR, LaneIndex);
392   return true;
393 }
394 
395 bool SIMachineFunctionInfo::allocatePhysicalVGPRForSGPRSpills(
396     MachineFunction &MF, int FI, unsigned LaneIndex, bool IsPrologEpilog) {
397   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
398   const SIRegisterInfo *TRI = ST.getRegisterInfo();
399   MachineRegisterInfo &MRI = MF.getRegInfo();
400   Register LaneVGPR;
401   if (!LaneIndex) {
402     // Find the highest available register if called before RA to ensure the
403     // lowest registers are available for allocation. The LaneVGPR, in that
404     // case, will be shifted back to the lowest range after VGPR allocation.
405     LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF,
406                                        !IsPrologEpilog);
407     if (LaneVGPR == AMDGPU::NoRegister) {
408       // We have no VGPRs left for spilling SGPRs. Reset because we will not
409       // partially spill the SGPR to VGPRs.
410       SGPRSpillsToPhysicalVGPRLanes.erase(FI);
411       return false;
412     }
413 
414     if (IsPrologEpilog)
415       allocateWWMSpill(MF, LaneVGPR);
416 
417     reserveWWMRegister(LaneVGPR);
418     for (MachineBasicBlock &MBB : MF) {
419       MBB.addLiveIn(LaneVGPR);
420       MBB.sortUniqueLiveIns();
421     }
422     SpillPhysVGPRs.push_back(LaneVGPR);
423   } else {
424     LaneVGPR = SpillPhysVGPRs.back();
425   }
426 
427   SGPRSpillsToPhysicalVGPRLanes[FI].emplace_back(LaneVGPR, LaneIndex);
428   return true;
429 }
430 
431 bool SIMachineFunctionInfo::allocateSGPRSpillToVGPRLane(
432     MachineFunction &MF, int FI, bool SpillToPhysVGPRLane,
433     bool IsPrologEpilog) {
434   std::vector<SIRegisterInfo::SpilledReg> &SpillLanes =
435       SpillToPhysVGPRLane ? SGPRSpillsToPhysicalVGPRLanes[FI]
436                           : SGPRSpillsToVirtualVGPRLanes[FI];
437 
438   // This has already been allocated.
439   if (!SpillLanes.empty())
440     return true;
441 
442   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
443   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
444   unsigned WaveSize = ST.getWavefrontSize();
445 
446   unsigned Size = FrameInfo.getObjectSize(FI);
447   unsigned NumLanes = Size / 4;
448 
449   if (NumLanes > WaveSize)
450     return false;
451 
452   assert(Size >= 4 && "invalid sgpr spill size");
453   assert(ST.getRegisterInfo()->spillSGPRToVGPR() &&
454          "not spilling SGPRs to VGPRs");
455 
456   unsigned &NumSpillLanes = SpillToPhysVGPRLane ? NumPhysicalVGPRSpillLanes
457                                                 : NumVirtualVGPRSpillLanes;
458 
459   for (unsigned I = 0; I < NumLanes; ++I, ++NumSpillLanes) {
460     unsigned LaneIndex = (NumSpillLanes % WaveSize);
461 
462     bool Allocated = SpillToPhysVGPRLane
463                          ? allocatePhysicalVGPRForSGPRSpills(MF, FI, LaneIndex,
464                                                              IsPrologEpilog)
465                          : allocateVirtualVGPRForSGPRSpills(MF, FI, LaneIndex);
466     if (!Allocated) {
467       NumSpillLanes -= I;
468       return false;
469     }
470   }
471 
472   return true;
473 }
474 
475 /// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI.
476 /// Either AGPR is spilled to VGPR to vice versa.
477 /// Returns true if a \p FI can be eliminated completely.
478 bool SIMachineFunctionInfo::allocateVGPRSpillToAGPR(MachineFunction &MF,
479                                                     int FI,
480                                                     bool isAGPRtoVGPR) {
481   MachineRegisterInfo &MRI = MF.getRegInfo();
482   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
483   const GCNSubtarget &ST =  MF.getSubtarget<GCNSubtarget>();
484 
485   assert(ST.hasMAIInsts() && FrameInfo.isSpillSlotObjectIndex(FI));
486 
487   auto &Spill = VGPRToAGPRSpills[FI];
488 
489   // This has already been allocated.
490   if (!Spill.Lanes.empty())
491     return Spill.FullyAllocated;
492 
493   unsigned Size = FrameInfo.getObjectSize(FI);
494   unsigned NumLanes = Size / 4;
495   Spill.Lanes.resize(NumLanes, AMDGPU::NoRegister);
496 
497   const TargetRegisterClass &RC =
498       isAGPRtoVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::AGPR_32RegClass;
499   auto Regs = RC.getRegisters();
500 
501   auto &SpillRegs = isAGPRtoVGPR ? SpillAGPR : SpillVGPR;
502   const SIRegisterInfo *TRI = ST.getRegisterInfo();
503   Spill.FullyAllocated = true;
504 
505   // FIXME: Move allocation logic out of MachineFunctionInfo and initialize
506   // once.
507   BitVector OtherUsedRegs;
508   OtherUsedRegs.resize(TRI->getNumRegs());
509 
510   const uint32_t *CSRMask =
511       TRI->getCallPreservedMask(MF, MF.getFunction().getCallingConv());
512   if (CSRMask)
513     OtherUsedRegs.setBitsInMask(CSRMask);
514 
515   // TODO: Should include register tuples, but doesn't matter with current
516   // usage.
517   for (MCPhysReg Reg : SpillAGPR)
518     OtherUsedRegs.set(Reg);
519   for (MCPhysReg Reg : SpillVGPR)
520     OtherUsedRegs.set(Reg);
521 
522   SmallVectorImpl<MCPhysReg>::const_iterator NextSpillReg = Regs.begin();
523   for (int I = NumLanes - 1; I >= 0; --I) {
524     NextSpillReg = std::find_if(
525         NextSpillReg, Regs.end(), [&MRI, &OtherUsedRegs](MCPhysReg Reg) {
526           return MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg) &&
527                  !OtherUsedRegs[Reg];
528         });
529 
530     if (NextSpillReg == Regs.end()) { // Registers exhausted
531       Spill.FullyAllocated = false;
532       break;
533     }
534 
535     OtherUsedRegs.set(*NextSpillReg);
536     SpillRegs.push_back(*NextSpillReg);
537     MRI.reserveReg(*NextSpillReg, TRI);
538     Spill.Lanes[I] = *NextSpillReg++;
539   }
540 
541   return Spill.FullyAllocated;
542 }
543 
544 bool SIMachineFunctionInfo::removeDeadFrameIndices(
545     MachineFrameInfo &MFI, bool ResetSGPRSpillStackIDs) {
546   // Remove dead frame indices from function frame, however keep FP & BP since
547   // spills for them haven't been inserted yet. And also make sure to remove the
548   // frame indices from `SGPRSpillsToVirtualVGPRLanes` data structure,
549   // otherwise, it could result in an unexpected side effect and bug, in case of
550   // any re-mapping of freed frame indices by later pass(es) like "stack slot
551   // coloring".
552   for (auto &R : make_early_inc_range(SGPRSpillsToVirtualVGPRLanes)) {
553     MFI.RemoveStackObject(R.first);
554     SGPRSpillsToVirtualVGPRLanes.erase(R.first);
555   }
556 
557   // Remove the dead frame indices of CSR SGPRs which are spilled to physical
558   // VGPR lanes during SILowerSGPRSpills pass.
559   if (!ResetSGPRSpillStackIDs) {
560     for (auto &R : make_early_inc_range(SGPRSpillsToPhysicalVGPRLanes)) {
561       MFI.RemoveStackObject(R.first);
562       SGPRSpillsToPhysicalVGPRLanes.erase(R.first);
563     }
564   }
565   bool HaveSGPRToMemory = false;
566 
567   if (ResetSGPRSpillStackIDs) {
568     // All other SGPRs must be allocated on the default stack, so reset the
569     // stack ID.
570     for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); I != E;
571          ++I) {
572       if (!checkIndexInPrologEpilogSGPRSpills(I)) {
573         if (MFI.getStackID(I) == TargetStackID::SGPRSpill) {
574           MFI.setStackID(I, TargetStackID::Default);
575           HaveSGPRToMemory = true;
576         }
577       }
578     }
579   }
580 
581   for (auto &R : VGPRToAGPRSpills) {
582     if (R.second.IsDead)
583       MFI.RemoveStackObject(R.first);
584   }
585 
586   return HaveSGPRToMemory;
587 }
588 
589 int SIMachineFunctionInfo::getScavengeFI(MachineFrameInfo &MFI,
590                                          const SIRegisterInfo &TRI) {
591   if (ScavengeFI)
592     return *ScavengeFI;
593 
594   ScavengeFI =
595       MFI.CreateStackObject(TRI.getSpillSize(AMDGPU::SGPR_32RegClass),
596                             TRI.getSpillAlign(AMDGPU::SGPR_32RegClass), false);
597   return *ScavengeFI;
598 }
599 
600 MCPhysReg SIMachineFunctionInfo::getNextUserSGPR() const {
601   assert(NumSystemSGPRs == 0 && "System SGPRs must be added after user SGPRs");
602   return AMDGPU::SGPR0 + NumUserSGPRs;
603 }
604 
605 MCPhysReg SIMachineFunctionInfo::getNextSystemSGPR() const {
606   return AMDGPU::SGPR0 + NumUserSGPRs + NumSystemSGPRs;
607 }
608 
609 void SIMachineFunctionInfo::MRI_NoteNewVirtualRegister(Register Reg) {
610   VRegFlags.grow(Reg);
611 }
612 
613 void SIMachineFunctionInfo::MRI_NoteCloneVirtualRegister(Register NewReg,
614                                                          Register SrcReg) {
615   VRegFlags.grow(NewReg);
616   VRegFlags[NewReg] = VRegFlags[SrcReg];
617 }
618 
619 Register
620 SIMachineFunctionInfo::getGITPtrLoReg(const MachineFunction &MF) const {
621   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
622   if (!ST.isAmdPalOS())
623     return Register();
624   Register GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in
625   if (ST.hasMergedShaders()) {
626     switch (MF.getFunction().getCallingConv()) {
627     case CallingConv::AMDGPU_HS:
628     case CallingConv::AMDGPU_GS:
629       // Low GIT address is passed in s8 rather than s0 for an LS+HS or
630       // ES+GS merged shader on gfx9+.
631       GitPtrLo = AMDGPU::SGPR8;
632       return GitPtrLo;
633     default:
634       return GitPtrLo;
635     }
636   }
637   return GitPtrLo;
638 }
639 
640 static yaml::StringValue regToString(Register Reg,
641                                      const TargetRegisterInfo &TRI) {
642   yaml::StringValue Dest;
643   {
644     raw_string_ostream OS(Dest.Value);
645     OS << printReg(Reg, &TRI);
646   }
647   return Dest;
648 }
649 
650 static std::optional<yaml::SIArgumentInfo>
651 convertArgumentInfo(const AMDGPUFunctionArgInfo &ArgInfo,
652                     const TargetRegisterInfo &TRI) {
653   yaml::SIArgumentInfo AI;
654 
655   auto convertArg = [&](std::optional<yaml::SIArgument> &A,
656                         const ArgDescriptor &Arg) {
657     if (!Arg)
658       return false;
659 
660     // Create a register or stack argument.
661     yaml::SIArgument SA = yaml::SIArgument::createArgument(Arg.isRegister());
662     if (Arg.isRegister()) {
663       raw_string_ostream OS(SA.RegisterName.Value);
664       OS << printReg(Arg.getRegister(), &TRI);
665     } else
666       SA.StackOffset = Arg.getStackOffset();
667     // Check and update the optional mask.
668     if (Arg.isMasked())
669       SA.Mask = Arg.getMask();
670 
671     A = SA;
672     return true;
673   };
674 
675   // TODO: Need to serialize kernarg preloads.
676   bool Any = false;
677   Any |= convertArg(AI.PrivateSegmentBuffer, ArgInfo.PrivateSegmentBuffer);
678   Any |= convertArg(AI.DispatchPtr, ArgInfo.DispatchPtr);
679   Any |= convertArg(AI.QueuePtr, ArgInfo.QueuePtr);
680   Any |= convertArg(AI.KernargSegmentPtr, ArgInfo.KernargSegmentPtr);
681   Any |= convertArg(AI.DispatchID, ArgInfo.DispatchID);
682   Any |= convertArg(AI.FlatScratchInit, ArgInfo.FlatScratchInit);
683   Any |= convertArg(AI.LDSKernelId, ArgInfo.LDSKernelId);
684   Any |= convertArg(AI.PrivateSegmentSize, ArgInfo.PrivateSegmentSize);
685   Any |= convertArg(AI.WorkGroupIDX, ArgInfo.WorkGroupIDX);
686   Any |= convertArg(AI.WorkGroupIDY, ArgInfo.WorkGroupIDY);
687   Any |= convertArg(AI.WorkGroupIDZ, ArgInfo.WorkGroupIDZ);
688   Any |= convertArg(AI.WorkGroupInfo, ArgInfo.WorkGroupInfo);
689   Any |= convertArg(AI.PrivateSegmentWaveByteOffset,
690                     ArgInfo.PrivateSegmentWaveByteOffset);
691   Any |= convertArg(AI.ImplicitArgPtr, ArgInfo.ImplicitArgPtr);
692   Any |= convertArg(AI.ImplicitBufferPtr, ArgInfo.ImplicitBufferPtr);
693   Any |= convertArg(AI.WorkItemIDX, ArgInfo.WorkItemIDX);
694   Any |= convertArg(AI.WorkItemIDY, ArgInfo.WorkItemIDY);
695   Any |= convertArg(AI.WorkItemIDZ, ArgInfo.WorkItemIDZ);
696 
697   if (Any)
698     return AI;
699 
700   return std::nullopt;
701 }
702 
703 yaml::SIMachineFunctionInfo::SIMachineFunctionInfo(
704     const llvm::SIMachineFunctionInfo &MFI, const TargetRegisterInfo &TRI,
705     const llvm::MachineFunction &MF)
706     : ExplicitKernArgSize(MFI.getExplicitKernArgSize()),
707       MaxKernArgAlign(MFI.getMaxKernArgAlign()), LDSSize(MFI.getLDSSize()),
708       GDSSize(MFI.getGDSSize()), DynLDSAlign(MFI.getDynLDSAlign()),
709       IsEntryFunction(MFI.isEntryFunction()),
710       NoSignedZerosFPMath(MFI.hasNoSignedZerosFPMath()),
711       MemoryBound(MFI.isMemoryBound()), WaveLimiter(MFI.needsWaveLimiter()),
712       HasSpilledSGPRs(MFI.hasSpilledSGPRs()),
713       HasSpilledVGPRs(MFI.hasSpilledVGPRs()),
714       HighBitsOf32BitAddress(MFI.get32BitAddressHighBits()),
715       Occupancy(MFI.getOccupancy()),
716       ScratchRSrcReg(regToString(MFI.getScratchRSrcReg(), TRI)),
717       FrameOffsetReg(regToString(MFI.getFrameOffsetReg(), TRI)),
718       StackPtrOffsetReg(regToString(MFI.getStackPtrOffsetReg(), TRI)),
719       BytesInStackArgArea(MFI.getBytesInStackArgArea()),
720       ReturnsVoid(MFI.returnsVoid()),
721       ArgInfo(convertArgumentInfo(MFI.getArgInfo(), TRI)),
722       PSInputAddr(MFI.getPSInputAddr()), PSInputEnable(MFI.getPSInputEnable()),
723       MaxMemoryClusterDWords(MFI.getMaxMemoryClusterDWords()),
724       Mode(MFI.getMode()), HasInitWholeWave(MFI.hasInitWholeWave()),
725       DynamicVGPRBlockSize(MFI.getDynamicVGPRBlockSize()),
726       ScratchReservedForDynamicVGPRs(MFI.getScratchReservedForDynamicVGPRs()) {
727   for (Register Reg : MFI.getSGPRSpillPhysVGPRs())
728     SpillPhysVGPRS.push_back(regToString(Reg, TRI));
729 
730   for (Register Reg : MFI.getWWMReservedRegs())
731     WWMReservedRegs.push_back(regToString(Reg, TRI));
732 
733   if (MFI.getLongBranchReservedReg())
734     LongBranchReservedReg = regToString(MFI.getLongBranchReservedReg(), TRI);
735   if (MFI.getVGPRForAGPRCopy())
736     VGPRForAGPRCopy = regToString(MFI.getVGPRForAGPRCopy(), TRI);
737 
738   if (MFI.getSGPRForEXECCopy())
739     SGPRForEXECCopy = regToString(MFI.getSGPRForEXECCopy(), TRI);
740 
741   auto SFI = MFI.getOptionalScavengeFI();
742   if (SFI)
743     ScavengeFI = yaml::FrameIndex(*SFI, MF.getFrameInfo());
744 }
745 
746 void yaml::SIMachineFunctionInfo::mappingImpl(yaml::IO &YamlIO) {
747   MappingTraits<SIMachineFunctionInfo>::mapping(YamlIO, *this);
748 }
749 
750 bool SIMachineFunctionInfo::initializeBaseYamlFields(
751     const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF,
752     PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) {
753   ExplicitKernArgSize = YamlMFI.ExplicitKernArgSize;
754   MaxKernArgAlign = YamlMFI.MaxKernArgAlign;
755   LDSSize = YamlMFI.LDSSize;
756   GDSSize = YamlMFI.GDSSize;
757   DynLDSAlign = YamlMFI.DynLDSAlign;
758   PSInputAddr = YamlMFI.PSInputAddr;
759   PSInputEnable = YamlMFI.PSInputEnable;
760   MaxMemoryClusterDWords = YamlMFI.MaxMemoryClusterDWords;
761   HighBitsOf32BitAddress = YamlMFI.HighBitsOf32BitAddress;
762   Occupancy = YamlMFI.Occupancy;
763   IsEntryFunction = YamlMFI.IsEntryFunction;
764   NoSignedZerosFPMath = YamlMFI.NoSignedZerosFPMath;
765   MemoryBound = YamlMFI.MemoryBound;
766   WaveLimiter = YamlMFI.WaveLimiter;
767   HasSpilledSGPRs = YamlMFI.HasSpilledSGPRs;
768   HasSpilledVGPRs = YamlMFI.HasSpilledVGPRs;
769   BytesInStackArgArea = YamlMFI.BytesInStackArgArea;
770   ReturnsVoid = YamlMFI.ReturnsVoid;
771 
772   if (YamlMFI.ScavengeFI) {
773     auto FIOrErr = YamlMFI.ScavengeFI->getFI(MF.getFrameInfo());
774     if (!FIOrErr) {
775       // Create a diagnostic for a the frame index.
776       const MemoryBuffer &Buffer =
777           *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
778 
779       Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, 1,
780                            SourceMgr::DK_Error, toString(FIOrErr.takeError()),
781                            "", {}, {});
782       SourceRange = YamlMFI.ScavengeFI->SourceRange;
783       return true;
784     }
785     ScavengeFI = *FIOrErr;
786   } else {
787     ScavengeFI = std::nullopt;
788   }
789   return false;
790 }
791 
792 bool SIMachineFunctionInfo::mayUseAGPRs(const Function &F) const {
793   auto [MinNumAGPR, MaxNumAGPR] =
794       AMDGPU::getIntegerPairAttribute(F, "amdgpu-agpr-alloc", {~0u, ~0u},
795                                       /*OnlyFirstRequired=*/true);
796   return MinNumAGPR != 0u;
797 }
798