10b57cec5SDimitry Andric //===- SIMachineFunctionInfo.cpp - SI Machine Function Info ---------------===// 20b57cec5SDimitry Andric // 30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 60b57cec5SDimitry Andric // 70b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 80b57cec5SDimitry Andric 90b57cec5SDimitry Andric #include "SIMachineFunctionInfo.h" 105ffd83dbSDimitry Andric #include "AMDGPUTargetMachine.h" 11fe6060f1SDimitry Andric #include "AMDGPUSubtarget.h" 12fe6060f1SDimitry Andric #include "SIRegisterInfo.h" 13fe6060f1SDimitry Andric #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 14fe6060f1SDimitry Andric #include "Utils/AMDGPUBaseInfo.h" 15fe6060f1SDimitry Andric #include "llvm/ADT/Optional.h" 16fe6060f1SDimitry Andric #include "llvm/CodeGen/LiveIntervals.h" 17fe6060f1SDimitry Andric #include "llvm/CodeGen/MachineBasicBlock.h" 18fe6060f1SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h" 19fe6060f1SDimitry Andric #include "llvm/CodeGen/MachineFunction.h" 20fe6060f1SDimitry Andric #include "llvm/CodeGen/MachineRegisterInfo.h" 21fe6060f1SDimitry Andric #include "llvm/CodeGen/MIRParser/MIParser.h" 22fe6060f1SDimitry Andric #include "llvm/IR/CallingConv.h" 23fe6060f1SDimitry Andric #include "llvm/IR/DiagnosticInfo.h" 24fe6060f1SDimitry Andric #include "llvm/IR/Function.h" 25fe6060f1SDimitry Andric #include <cassert> 26fe6060f1SDimitry Andric #include <vector> 270b57cec5SDimitry Andric 280b57cec5SDimitry Andric #define MAX_LANES 64 290b57cec5SDimitry Andric 300b57cec5SDimitry Andric using namespace llvm; 310b57cec5SDimitry Andric 320b57cec5SDimitry Andric SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) 330b57cec5SDimitry Andric : AMDGPUMachineFunction(MF), 340b57cec5SDimitry Andric PrivateSegmentBuffer(false), 350b57cec5SDimitry Andric DispatchPtr(false), 360b57cec5SDimitry Andric QueuePtr(false), 370b57cec5SDimitry Andric KernargSegmentPtr(false), 380b57cec5SDimitry Andric DispatchID(false), 390b57cec5SDimitry Andric FlatScratchInit(false), 400b57cec5SDimitry Andric WorkGroupIDX(false), 410b57cec5SDimitry Andric WorkGroupIDY(false), 420b57cec5SDimitry Andric WorkGroupIDZ(false), 430b57cec5SDimitry Andric WorkGroupInfo(false), 440b57cec5SDimitry Andric PrivateSegmentWaveByteOffset(false), 450b57cec5SDimitry Andric WorkItemIDX(false), 460b57cec5SDimitry Andric WorkItemIDY(false), 470b57cec5SDimitry Andric WorkItemIDZ(false), 480b57cec5SDimitry Andric ImplicitBufferPtr(false), 490b57cec5SDimitry Andric ImplicitArgPtr(false), 500b57cec5SDimitry Andric GITPtrHigh(0xffffffff), 510b57cec5SDimitry Andric HighBitsOf32BitAddress(0), 520b57cec5SDimitry Andric GDSSize(0) { 530b57cec5SDimitry Andric const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 540b57cec5SDimitry Andric const Function &F = MF.getFunction(); 550b57cec5SDimitry Andric FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F); 560b57cec5SDimitry Andric WavesPerEU = ST.getWavesPerEU(F); 570b57cec5SDimitry Andric 585ffd83dbSDimitry Andric Occupancy = ST.computeOccupancy(F, getLDSSize()); 590b57cec5SDimitry Andric CallingConv::ID CC = F.getCallingConv(); 600b57cec5SDimitry Andric 615ffd83dbSDimitry Andric // FIXME: Should have analysis or something rather than attribute to detect 625ffd83dbSDimitry Andric // calls. 635ffd83dbSDimitry Andric const bool HasCalls = F.hasFnAttribute("amdgpu-calls"); 645ffd83dbSDimitry Andric 65349cc55cSDimitry Andric const bool IsKernel = CC == CallingConv::AMDGPU_KERNEL || 66349cc55cSDimitry Andric CC == CallingConv::SPIR_KERNEL; 675ffd83dbSDimitry Andric 68349cc55cSDimitry Andric if (IsKernel) { 69349cc55cSDimitry Andric if (!F.arg_empty() || ST.getImplicitArgNumBytes(F) != 0) 700b57cec5SDimitry Andric KernargSegmentPtr = true; 710b57cec5SDimitry Andric WorkGroupIDX = true; 720b57cec5SDimitry Andric WorkItemIDX = true; 730b57cec5SDimitry Andric } else if (CC == CallingConv::AMDGPU_PS) { 740b57cec5SDimitry Andric PSInputAddr = AMDGPU::getInitialPSInputAddr(F); 750b57cec5SDimitry Andric } 760b57cec5SDimitry Andric 770b57cec5SDimitry Andric if (!isEntryFunction()) { 780eae32dcSDimitry Andric if (CC != CallingConv::AMDGPU_Gfx) 79fe6060f1SDimitry Andric ArgInfo = AMDGPUArgumentUsageInfo::FixedABIFunctionInfo; 80fe6060f1SDimitry Andric 810b57cec5SDimitry Andric // TODO: Pick a high register, and shift down, similar to a kernel. 825ffd83dbSDimitry Andric FrameOffsetReg = AMDGPU::SGPR33; 830b57cec5SDimitry Andric StackPtrOffsetReg = AMDGPU::SGPR32; 840b57cec5SDimitry Andric 85e8d8bef9SDimitry Andric if (!ST.enableFlatScratch()) { 86e8d8bef9SDimitry Andric // Non-entry functions have no special inputs for now, other registers 87e8d8bef9SDimitry Andric // required for scratch access. 88e8d8bef9SDimitry Andric ScratchRSrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3; 89e8d8bef9SDimitry Andric 900b57cec5SDimitry Andric ArgInfo.PrivateSegmentBuffer = 910b57cec5SDimitry Andric ArgDescriptor::createRegister(ScratchRSrcReg); 92e8d8bef9SDimitry Andric } 930b57cec5SDimitry Andric 94349cc55cSDimitry Andric if (!F.hasFnAttribute("amdgpu-no-implicitarg-ptr")) 950b57cec5SDimitry Andric ImplicitArgPtr = true; 960b57cec5SDimitry Andric } else { 97349cc55cSDimitry Andric ImplicitArgPtr = false; 980b57cec5SDimitry Andric MaxKernArgAlign = std::max(ST.getAlignmentForImplicitArgPtr(), 990b57cec5SDimitry Andric MaxKernArgAlign); 1000b57cec5SDimitry Andric } 101349cc55cSDimitry Andric 102349cc55cSDimitry Andric bool isAmdHsaOrMesa = ST.isAmdHsaOrMesa(F); 103349cc55cSDimitry Andric if (isAmdHsaOrMesa && !ST.enableFlatScratch()) 104349cc55cSDimitry Andric PrivateSegmentBuffer = true; 105349cc55cSDimitry Andric else if (ST.isMesaGfxShader(F)) 106349cc55cSDimitry Andric ImplicitBufferPtr = true; 1070b57cec5SDimitry Andric 1080eae32dcSDimitry Andric if (!AMDGPU::isGraphics(CC)) { 109349cc55cSDimitry Andric if (IsKernel || !F.hasFnAttribute("amdgpu-no-workgroup-id-x")) 1100b57cec5SDimitry Andric WorkGroupIDX = true; 1110b57cec5SDimitry Andric 112349cc55cSDimitry Andric if (!F.hasFnAttribute("amdgpu-no-workgroup-id-y")) 1130b57cec5SDimitry Andric WorkGroupIDY = true; 1140b57cec5SDimitry Andric 115349cc55cSDimitry Andric if (!F.hasFnAttribute("amdgpu-no-workgroup-id-z")) 1160b57cec5SDimitry Andric WorkGroupIDZ = true; 1170b57cec5SDimitry Andric 118349cc55cSDimitry Andric if (IsKernel || !F.hasFnAttribute("amdgpu-no-workitem-id-x")) 1190b57cec5SDimitry Andric WorkItemIDX = true; 1200b57cec5SDimitry Andric 121*04eeddc0SDimitry Andric if (!F.hasFnAttribute("amdgpu-no-workitem-id-y") && 122*04eeddc0SDimitry Andric ST.getMaxWorkitemID(F, 1) != 0) 1230b57cec5SDimitry Andric WorkItemIDY = true; 1240b57cec5SDimitry Andric 125*04eeddc0SDimitry Andric if (!F.hasFnAttribute("amdgpu-no-workitem-id-z") && 126*04eeddc0SDimitry Andric ST.getMaxWorkitemID(F, 2) != 0) 1270b57cec5SDimitry Andric WorkItemIDZ = true; 128349cc55cSDimitry Andric 129349cc55cSDimitry Andric if (!F.hasFnAttribute("amdgpu-no-dispatch-ptr")) 130349cc55cSDimitry Andric DispatchPtr = true; 131349cc55cSDimitry Andric 132349cc55cSDimitry Andric if (!F.hasFnAttribute("amdgpu-no-queue-ptr")) 133349cc55cSDimitry Andric QueuePtr = true; 134349cc55cSDimitry Andric 135349cc55cSDimitry Andric if (!F.hasFnAttribute("amdgpu-no-dispatch-id")) 136349cc55cSDimitry Andric DispatchID = true; 1375ffd83dbSDimitry Andric } 1380b57cec5SDimitry Andric 139349cc55cSDimitry Andric // FIXME: This attribute is a hack, we just need an analysis on the function 140349cc55cSDimitry Andric // to look for allocas. 1415ffd83dbSDimitry Andric bool HasStackObjects = F.hasFnAttribute("amdgpu-stack-objects"); 142349cc55cSDimitry Andric 143349cc55cSDimitry Andric // TODO: This could be refined a lot. The attribute is a poor way of 144349cc55cSDimitry Andric // detecting calls or stack objects that may require it before argument 145349cc55cSDimitry Andric // lowering. 146349cc55cSDimitry Andric if (ST.hasFlatAddressSpace() && isEntryFunction() && 147349cc55cSDimitry Andric (isAmdHsaOrMesa || ST.enableFlatScratch()) && 148349cc55cSDimitry Andric (HasCalls || HasStackObjects || ST.enableFlatScratch()) && 149349cc55cSDimitry Andric !ST.flatScratchIsArchitected()) { 150349cc55cSDimitry Andric FlatScratchInit = true; 151349cc55cSDimitry Andric } 152349cc55cSDimitry Andric 1530b57cec5SDimitry Andric if (isEntryFunction()) { 1540b57cec5SDimitry Andric // X, XY, and XYZ are the only supported combinations, so make sure Y is 1550b57cec5SDimitry Andric // enabled if Z is. 1560b57cec5SDimitry Andric if (WorkItemIDZ) 1570b57cec5SDimitry Andric WorkItemIDY = true; 1580b57cec5SDimitry Andric 159fe6060f1SDimitry Andric if (!ST.flatScratchIsArchitected()) { 1600b57cec5SDimitry Andric PrivateSegmentWaveByteOffset = true; 1610b57cec5SDimitry Andric 1620b57cec5SDimitry Andric // HS and GS always have the scratch wave offset in SGPR5 on GFX9. 1630b57cec5SDimitry Andric if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 && 1640b57cec5SDimitry Andric (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS)) 1650b57cec5SDimitry Andric ArgInfo.PrivateSegmentWaveByteOffset = 1660b57cec5SDimitry Andric ArgDescriptor::createRegister(AMDGPU::SGPR5); 1670b57cec5SDimitry Andric } 168fe6060f1SDimitry Andric } 1690b57cec5SDimitry Andric 1700b57cec5SDimitry Andric Attribute A = F.getFnAttribute("amdgpu-git-ptr-high"); 1710b57cec5SDimitry Andric StringRef S = A.getValueAsString(); 1720b57cec5SDimitry Andric if (!S.empty()) 1730b57cec5SDimitry Andric S.consumeInteger(0, GITPtrHigh); 1740b57cec5SDimitry Andric 1750b57cec5SDimitry Andric A = F.getFnAttribute("amdgpu-32bit-address-high-bits"); 1760b57cec5SDimitry Andric S = A.getValueAsString(); 1770b57cec5SDimitry Andric if (!S.empty()) 1780b57cec5SDimitry Andric S.consumeInteger(0, HighBitsOf32BitAddress); 1790b57cec5SDimitry Andric 1800b57cec5SDimitry Andric S = F.getFnAttribute("amdgpu-gds-size").getValueAsString(); 1810b57cec5SDimitry Andric if (!S.empty()) 1820b57cec5SDimitry Andric S.consumeInteger(0, GDSSize); 1830b57cec5SDimitry Andric } 1840b57cec5SDimitry Andric 1850b57cec5SDimitry Andric void SIMachineFunctionInfo::limitOccupancy(const MachineFunction &MF) { 1860b57cec5SDimitry Andric limitOccupancy(getMaxWavesPerEU()); 1870b57cec5SDimitry Andric const GCNSubtarget& ST = MF.getSubtarget<GCNSubtarget>(); 1880b57cec5SDimitry Andric limitOccupancy(ST.getOccupancyWithLocalMemSize(getLDSSize(), 1890b57cec5SDimitry Andric MF.getFunction())); 1900b57cec5SDimitry Andric } 1910b57cec5SDimitry Andric 1925ffd83dbSDimitry Andric Register SIMachineFunctionInfo::addPrivateSegmentBuffer( 1930b57cec5SDimitry Andric const SIRegisterInfo &TRI) { 1940b57cec5SDimitry Andric ArgInfo.PrivateSegmentBuffer = 1950b57cec5SDimitry Andric ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 1968bcb0991SDimitry Andric getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SGPR_128RegClass)); 1970b57cec5SDimitry Andric NumUserSGPRs += 4; 1980b57cec5SDimitry Andric return ArgInfo.PrivateSegmentBuffer.getRegister(); 1990b57cec5SDimitry Andric } 2000b57cec5SDimitry Andric 2015ffd83dbSDimitry Andric Register SIMachineFunctionInfo::addDispatchPtr(const SIRegisterInfo &TRI) { 2020b57cec5SDimitry Andric ArgInfo.DispatchPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 2030b57cec5SDimitry Andric getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 2040b57cec5SDimitry Andric NumUserSGPRs += 2; 2050b57cec5SDimitry Andric return ArgInfo.DispatchPtr.getRegister(); 2060b57cec5SDimitry Andric } 2070b57cec5SDimitry Andric 2085ffd83dbSDimitry Andric Register SIMachineFunctionInfo::addQueuePtr(const SIRegisterInfo &TRI) { 2090b57cec5SDimitry Andric ArgInfo.QueuePtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 2100b57cec5SDimitry Andric getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 2110b57cec5SDimitry Andric NumUserSGPRs += 2; 2120b57cec5SDimitry Andric return ArgInfo.QueuePtr.getRegister(); 2130b57cec5SDimitry Andric } 2140b57cec5SDimitry Andric 2155ffd83dbSDimitry Andric Register SIMachineFunctionInfo::addKernargSegmentPtr(const SIRegisterInfo &TRI) { 2160b57cec5SDimitry Andric ArgInfo.KernargSegmentPtr 2170b57cec5SDimitry Andric = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 2180b57cec5SDimitry Andric getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 2190b57cec5SDimitry Andric NumUserSGPRs += 2; 2200b57cec5SDimitry Andric return ArgInfo.KernargSegmentPtr.getRegister(); 2210b57cec5SDimitry Andric } 2220b57cec5SDimitry Andric 2235ffd83dbSDimitry Andric Register SIMachineFunctionInfo::addDispatchID(const SIRegisterInfo &TRI) { 2240b57cec5SDimitry Andric ArgInfo.DispatchID = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 2250b57cec5SDimitry Andric getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 2260b57cec5SDimitry Andric NumUserSGPRs += 2; 2270b57cec5SDimitry Andric return ArgInfo.DispatchID.getRegister(); 2280b57cec5SDimitry Andric } 2290b57cec5SDimitry Andric 2305ffd83dbSDimitry Andric Register SIMachineFunctionInfo::addFlatScratchInit(const SIRegisterInfo &TRI) { 2310b57cec5SDimitry Andric ArgInfo.FlatScratchInit = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 2320b57cec5SDimitry Andric getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 2330b57cec5SDimitry Andric NumUserSGPRs += 2; 2340b57cec5SDimitry Andric return ArgInfo.FlatScratchInit.getRegister(); 2350b57cec5SDimitry Andric } 2360b57cec5SDimitry Andric 2375ffd83dbSDimitry Andric Register SIMachineFunctionInfo::addImplicitBufferPtr(const SIRegisterInfo &TRI) { 2380b57cec5SDimitry Andric ArgInfo.ImplicitBufferPtr = ArgDescriptor::createRegister(TRI.getMatchingSuperReg( 2390b57cec5SDimitry Andric getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); 2400b57cec5SDimitry Andric NumUserSGPRs += 2; 2410b57cec5SDimitry Andric return ArgInfo.ImplicitBufferPtr.getRegister(); 2420b57cec5SDimitry Andric } 2430b57cec5SDimitry Andric 2445ffd83dbSDimitry Andric bool SIMachineFunctionInfo::isCalleeSavedReg(const MCPhysReg *CSRegs, 2455ffd83dbSDimitry Andric MCPhysReg Reg) { 2460b57cec5SDimitry Andric for (unsigned I = 0; CSRegs[I]; ++I) { 2470b57cec5SDimitry Andric if (CSRegs[I] == Reg) 2480b57cec5SDimitry Andric return true; 2490b57cec5SDimitry Andric } 2500b57cec5SDimitry Andric 2510b57cec5SDimitry Andric return false; 2520b57cec5SDimitry Andric } 2530b57cec5SDimitry Andric 2540b57cec5SDimitry Andric /// \p returns true if \p NumLanes slots are available in VGPRs already used for 2550b57cec5SDimitry Andric /// SGPR spilling. 2560b57cec5SDimitry Andric // 2570b57cec5SDimitry Andric // FIXME: This only works after processFunctionBeforeFrameFinalized 2580b57cec5SDimitry Andric bool SIMachineFunctionInfo::haveFreeLanesForSGPRSpill(const MachineFunction &MF, 2590b57cec5SDimitry Andric unsigned NumNeed) const { 2600b57cec5SDimitry Andric const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 2610b57cec5SDimitry Andric unsigned WaveSize = ST.getWavefrontSize(); 2620b57cec5SDimitry Andric return NumVGPRSpillLanes + NumNeed <= WaveSize * SpillVGPRs.size(); 2630b57cec5SDimitry Andric } 2640b57cec5SDimitry Andric 2650b57cec5SDimitry Andric /// Reserve a slice of a VGPR to support spilling for FrameIndex \p FI. 2660b57cec5SDimitry Andric bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF, 2670b57cec5SDimitry Andric int FI) { 2680b57cec5SDimitry Andric std::vector<SpilledReg> &SpillLanes = SGPRToVGPRSpills[FI]; 2690b57cec5SDimitry Andric 2700b57cec5SDimitry Andric // This has already been allocated. 2710b57cec5SDimitry Andric if (!SpillLanes.empty()) 2720b57cec5SDimitry Andric return true; 2730b57cec5SDimitry Andric 2740b57cec5SDimitry Andric const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 2750b57cec5SDimitry Andric const SIRegisterInfo *TRI = ST.getRegisterInfo(); 2760b57cec5SDimitry Andric MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 2770b57cec5SDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo(); 2780b57cec5SDimitry Andric unsigned WaveSize = ST.getWavefrontSize(); 2790b57cec5SDimitry Andric 2800b57cec5SDimitry Andric unsigned Size = FrameInfo.getObjectSize(FI); 2815ffd83dbSDimitry Andric unsigned NumLanes = Size / 4; 2820b57cec5SDimitry Andric 2835ffd83dbSDimitry Andric if (NumLanes > WaveSize) 2845ffd83dbSDimitry Andric return false; 2855ffd83dbSDimitry Andric 2865ffd83dbSDimitry Andric assert(Size >= 4 && "invalid sgpr spill size"); 2875ffd83dbSDimitry Andric assert(TRI->spillSGPRToVGPR() && "not spilling SGPRs to VGPRs"); 2880b57cec5SDimitry Andric 2890b57cec5SDimitry Andric // Make sure to handle the case where a wide SGPR spill may span between two 2900b57cec5SDimitry Andric // VGPRs. 2915ffd83dbSDimitry Andric for (unsigned I = 0; I < NumLanes; ++I, ++NumVGPRSpillLanes) { 2925ffd83dbSDimitry Andric Register LaneVGPR; 2930b57cec5SDimitry Andric unsigned VGPRIndex = (NumVGPRSpillLanes % WaveSize); 2940b57cec5SDimitry Andric 295*04eeddc0SDimitry Andric if (VGPRIndex == 0) { 2960b57cec5SDimitry Andric LaneVGPR = TRI->findUnusedRegister(MRI, &AMDGPU::VGPR_32RegClass, MF); 2970b57cec5SDimitry Andric if (LaneVGPR == AMDGPU::NoRegister) { 2980b57cec5SDimitry Andric // We have no VGPRs left for spilling SGPRs. Reset because we will not 2990b57cec5SDimitry Andric // partially spill the SGPR to VGPRs. 3000b57cec5SDimitry Andric SGPRToVGPRSpills.erase(FI); 3010b57cec5SDimitry Andric NumVGPRSpillLanes -= I; 302fe6060f1SDimitry Andric 303*04eeddc0SDimitry Andric // FIXME: We can run out of free registers with split allocation if 304*04eeddc0SDimitry Andric // IPRA is enabled and a called function already uses every VGPR. 305fe6060f1SDimitry Andric #if 0 306fe6060f1SDimitry Andric DiagnosticInfoResourceLimit DiagOutOfRegs(MF.getFunction(), 307fe6060f1SDimitry Andric "VGPRs for SGPR spilling", 308fe6060f1SDimitry Andric 0, DS_Error); 309fe6060f1SDimitry Andric MF.getFunction().getContext().diagnose(DiagOutOfRegs); 310fe6060f1SDimitry Andric #endif 3110b57cec5SDimitry Andric return false; 3120b57cec5SDimitry Andric } 3130b57cec5SDimitry Andric 314fe6060f1SDimitry Andric Optional<int> SpillFI; 315fe6060f1SDimitry Andric // We need to preserve inactive lanes, so always save, even caller-save 316fe6060f1SDimitry Andric // registers. 317fe6060f1SDimitry Andric if (!isEntryFunction()) { 318fe6060f1SDimitry Andric SpillFI = FrameInfo.CreateSpillStackObject(4, Align(4)); 3190b57cec5SDimitry Andric } 3200b57cec5SDimitry Andric 321fe6060f1SDimitry Andric SpillVGPRs.push_back(SGPRSpillVGPR(LaneVGPR, SpillFI)); 3220b57cec5SDimitry Andric 3230b57cec5SDimitry Andric // Add this register as live-in to all blocks to avoid machine verifer 3240b57cec5SDimitry Andric // complaining about use of an undefined physical register. 3250b57cec5SDimitry Andric for (MachineBasicBlock &BB : MF) 3260b57cec5SDimitry Andric BB.addLiveIn(LaneVGPR); 3270b57cec5SDimitry Andric } else { 3280b57cec5SDimitry Andric LaneVGPR = SpillVGPRs.back().VGPR; 3290b57cec5SDimitry Andric } 3300b57cec5SDimitry Andric 3310b57cec5SDimitry Andric SpillLanes.push_back(SpilledReg(LaneVGPR, VGPRIndex)); 3320b57cec5SDimitry Andric } 3330b57cec5SDimitry Andric 3340b57cec5SDimitry Andric return true; 3350b57cec5SDimitry Andric } 3360b57cec5SDimitry Andric 3370b57cec5SDimitry Andric /// Reserve AGPRs or VGPRs to support spilling for FrameIndex \p FI. 3380b57cec5SDimitry Andric /// Either AGPR is spilled to VGPR to vice versa. 3390b57cec5SDimitry Andric /// Returns true if a \p FI can be eliminated completely. 3400b57cec5SDimitry Andric bool SIMachineFunctionInfo::allocateVGPRSpillToAGPR(MachineFunction &MF, 3410b57cec5SDimitry Andric int FI, 3420b57cec5SDimitry Andric bool isAGPRtoVGPR) { 3430b57cec5SDimitry Andric MachineRegisterInfo &MRI = MF.getRegInfo(); 3440b57cec5SDimitry Andric MachineFrameInfo &FrameInfo = MF.getFrameInfo(); 3450b57cec5SDimitry Andric const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 3460b57cec5SDimitry Andric 3470b57cec5SDimitry Andric assert(ST.hasMAIInsts() && FrameInfo.isSpillSlotObjectIndex(FI)); 3480b57cec5SDimitry Andric 3490b57cec5SDimitry Andric auto &Spill = VGPRToAGPRSpills[FI]; 3500b57cec5SDimitry Andric 3510b57cec5SDimitry Andric // This has already been allocated. 3520b57cec5SDimitry Andric if (!Spill.Lanes.empty()) 3530b57cec5SDimitry Andric return Spill.FullyAllocated; 3540b57cec5SDimitry Andric 3550b57cec5SDimitry Andric unsigned Size = FrameInfo.getObjectSize(FI); 3560b57cec5SDimitry Andric unsigned NumLanes = Size / 4; 3570b57cec5SDimitry Andric Spill.Lanes.resize(NumLanes, AMDGPU::NoRegister); 3580b57cec5SDimitry Andric 3590b57cec5SDimitry Andric const TargetRegisterClass &RC = 3600b57cec5SDimitry Andric isAGPRtoVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::AGPR_32RegClass; 3610b57cec5SDimitry Andric auto Regs = RC.getRegisters(); 3620b57cec5SDimitry Andric 3630b57cec5SDimitry Andric auto &SpillRegs = isAGPRtoVGPR ? SpillAGPR : SpillVGPR; 3640b57cec5SDimitry Andric const SIRegisterInfo *TRI = ST.getRegisterInfo(); 3650b57cec5SDimitry Andric Spill.FullyAllocated = true; 3660b57cec5SDimitry Andric 3670b57cec5SDimitry Andric // FIXME: Move allocation logic out of MachineFunctionInfo and initialize 3680b57cec5SDimitry Andric // once. 3690b57cec5SDimitry Andric BitVector OtherUsedRegs; 3700b57cec5SDimitry Andric OtherUsedRegs.resize(TRI->getNumRegs()); 3710b57cec5SDimitry Andric 3720b57cec5SDimitry Andric const uint32_t *CSRMask = 3730b57cec5SDimitry Andric TRI->getCallPreservedMask(MF, MF.getFunction().getCallingConv()); 3740b57cec5SDimitry Andric if (CSRMask) 3750b57cec5SDimitry Andric OtherUsedRegs.setBitsInMask(CSRMask); 3760b57cec5SDimitry Andric 3770b57cec5SDimitry Andric // TODO: Should include register tuples, but doesn't matter with current 3780b57cec5SDimitry Andric // usage. 3790b57cec5SDimitry Andric for (MCPhysReg Reg : SpillAGPR) 3800b57cec5SDimitry Andric OtherUsedRegs.set(Reg); 3810b57cec5SDimitry Andric for (MCPhysReg Reg : SpillVGPR) 3820b57cec5SDimitry Andric OtherUsedRegs.set(Reg); 3830b57cec5SDimitry Andric 3840b57cec5SDimitry Andric SmallVectorImpl<MCPhysReg>::const_iterator NextSpillReg = Regs.begin(); 385349cc55cSDimitry Andric for (int I = NumLanes - 1; I >= 0; --I) { 3860b57cec5SDimitry Andric NextSpillReg = std::find_if( 3870b57cec5SDimitry Andric NextSpillReg, Regs.end(), [&MRI, &OtherUsedRegs](MCPhysReg Reg) { 3880b57cec5SDimitry Andric return MRI.isAllocatable(Reg) && !MRI.isPhysRegUsed(Reg) && 3890b57cec5SDimitry Andric !OtherUsedRegs[Reg]; 3900b57cec5SDimitry Andric }); 3910b57cec5SDimitry Andric 3920b57cec5SDimitry Andric if (NextSpillReg == Regs.end()) { // Registers exhausted 3930b57cec5SDimitry Andric Spill.FullyAllocated = false; 3940b57cec5SDimitry Andric break; 3950b57cec5SDimitry Andric } 3960b57cec5SDimitry Andric 3970b57cec5SDimitry Andric OtherUsedRegs.set(*NextSpillReg); 3980b57cec5SDimitry Andric SpillRegs.push_back(*NextSpillReg); 3990b57cec5SDimitry Andric Spill.Lanes[I] = *NextSpillReg++; 4000b57cec5SDimitry Andric } 4010b57cec5SDimitry Andric 4020b57cec5SDimitry Andric return Spill.FullyAllocated; 4030b57cec5SDimitry Andric } 4040b57cec5SDimitry Andric 4050b57cec5SDimitry Andric void SIMachineFunctionInfo::removeDeadFrameIndices(MachineFrameInfo &MFI) { 406349cc55cSDimitry Andric // Remove dead frame indices from function frame, however keep FP & BP since 407349cc55cSDimitry Andric // spills for them haven't been inserted yet. And also make sure to remove the 408349cc55cSDimitry Andric // frame indices from `SGPRToVGPRSpills` data structure, otherwise, it could 409349cc55cSDimitry Andric // result in an unexpected side effect and bug, in case of any re-mapping of 410349cc55cSDimitry Andric // freed frame indices by later pass(es) like "stack slot coloring". 411349cc55cSDimitry Andric for (auto &R : make_early_inc_range(SGPRToVGPRSpills)) { 412349cc55cSDimitry Andric if (R.first != FramePointerSaveIndex && R.first != BasePointerSaveIndex) { 4130b57cec5SDimitry Andric MFI.RemoveStackObject(R.first); 414349cc55cSDimitry Andric SGPRToVGPRSpills.erase(R.first); 415349cc55cSDimitry Andric } 4160b57cec5SDimitry Andric } 4170b57cec5SDimitry Andric 4180b57cec5SDimitry Andric // All other SPGRs must be allocated on the default stack, so reset the stack 4190b57cec5SDimitry Andric // ID. 4200b57cec5SDimitry Andric for (int i = MFI.getObjectIndexBegin(), e = MFI.getObjectIndexEnd(); i != e; 4210b57cec5SDimitry Andric ++i) 4225ffd83dbSDimitry Andric if (i != FramePointerSaveIndex && i != BasePointerSaveIndex) 4230b57cec5SDimitry Andric MFI.setStackID(i, TargetStackID::Default); 4240b57cec5SDimitry Andric 4250b57cec5SDimitry Andric for (auto &R : VGPRToAGPRSpills) { 4260eae32dcSDimitry Andric if (R.second.IsDead) 4270b57cec5SDimitry Andric MFI.RemoveStackObject(R.first); 4280b57cec5SDimitry Andric } 4290b57cec5SDimitry Andric } 4300b57cec5SDimitry Andric 431fe6060f1SDimitry Andric int SIMachineFunctionInfo::getScavengeFI(MachineFrameInfo &MFI, 432fe6060f1SDimitry Andric const SIRegisterInfo &TRI) { 433fe6060f1SDimitry Andric if (ScavengeFI) 434fe6060f1SDimitry Andric return *ScavengeFI; 435fe6060f1SDimitry Andric if (isEntryFunction()) { 436fe6060f1SDimitry Andric ScavengeFI = MFI.CreateFixedObject( 437fe6060f1SDimitry Andric TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false); 438fe6060f1SDimitry Andric } else { 439fe6060f1SDimitry Andric ScavengeFI = MFI.CreateStackObject( 440fe6060f1SDimitry Andric TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 441fe6060f1SDimitry Andric TRI.getSpillAlign(AMDGPU::SGPR_32RegClass), false); 442fe6060f1SDimitry Andric } 443fe6060f1SDimitry Andric return *ScavengeFI; 444fe6060f1SDimitry Andric } 445fe6060f1SDimitry Andric 4460b57cec5SDimitry Andric MCPhysReg SIMachineFunctionInfo::getNextUserSGPR() const { 4470b57cec5SDimitry Andric assert(NumSystemSGPRs == 0 && "System SGPRs must be added after user SGPRs"); 4480b57cec5SDimitry Andric return AMDGPU::SGPR0 + NumUserSGPRs; 4490b57cec5SDimitry Andric } 4500b57cec5SDimitry Andric 4510b57cec5SDimitry Andric MCPhysReg SIMachineFunctionInfo::getNextSystemSGPR() const { 4520b57cec5SDimitry Andric return AMDGPU::SGPR0 + NumUserSGPRs + NumSystemSGPRs; 4530b57cec5SDimitry Andric } 4540b57cec5SDimitry Andric 4555ffd83dbSDimitry Andric Register 4565ffd83dbSDimitry Andric SIMachineFunctionInfo::getGITPtrLoReg(const MachineFunction &MF) const { 4575ffd83dbSDimitry Andric const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4585ffd83dbSDimitry Andric if (!ST.isAmdPalOS()) 4595ffd83dbSDimitry Andric return Register(); 4605ffd83dbSDimitry Andric Register GitPtrLo = AMDGPU::SGPR0; // Low GIT address passed in 4615ffd83dbSDimitry Andric if (ST.hasMergedShaders()) { 4625ffd83dbSDimitry Andric switch (MF.getFunction().getCallingConv()) { 4635ffd83dbSDimitry Andric case CallingConv::AMDGPU_HS: 4645ffd83dbSDimitry Andric case CallingConv::AMDGPU_GS: 4655ffd83dbSDimitry Andric // Low GIT address is passed in s8 rather than s0 for an LS+HS or 4665ffd83dbSDimitry Andric // ES+GS merged shader on gfx9+. 4675ffd83dbSDimitry Andric GitPtrLo = AMDGPU::SGPR8; 4685ffd83dbSDimitry Andric return GitPtrLo; 4695ffd83dbSDimitry Andric default: 4705ffd83dbSDimitry Andric return GitPtrLo; 4715ffd83dbSDimitry Andric } 4725ffd83dbSDimitry Andric } 4735ffd83dbSDimitry Andric return GitPtrLo; 4745ffd83dbSDimitry Andric } 4755ffd83dbSDimitry Andric 4765ffd83dbSDimitry Andric static yaml::StringValue regToString(Register Reg, 4770b57cec5SDimitry Andric const TargetRegisterInfo &TRI) { 4780b57cec5SDimitry Andric yaml::StringValue Dest; 4790b57cec5SDimitry Andric { 4800b57cec5SDimitry Andric raw_string_ostream OS(Dest.Value); 4810b57cec5SDimitry Andric OS << printReg(Reg, &TRI); 4820b57cec5SDimitry Andric } 4830b57cec5SDimitry Andric return Dest; 4840b57cec5SDimitry Andric } 4850b57cec5SDimitry Andric 4860b57cec5SDimitry Andric static Optional<yaml::SIArgumentInfo> 4870b57cec5SDimitry Andric convertArgumentInfo(const AMDGPUFunctionArgInfo &ArgInfo, 4880b57cec5SDimitry Andric const TargetRegisterInfo &TRI) { 4890b57cec5SDimitry Andric yaml::SIArgumentInfo AI; 4900b57cec5SDimitry Andric 4910b57cec5SDimitry Andric auto convertArg = [&](Optional<yaml::SIArgument> &A, 4920b57cec5SDimitry Andric const ArgDescriptor &Arg) { 4930b57cec5SDimitry Andric if (!Arg) 4940b57cec5SDimitry Andric return false; 4950b57cec5SDimitry Andric 4960b57cec5SDimitry Andric // Create a register or stack argument. 4970b57cec5SDimitry Andric yaml::SIArgument SA = yaml::SIArgument::createArgument(Arg.isRegister()); 4980b57cec5SDimitry Andric if (Arg.isRegister()) { 4990b57cec5SDimitry Andric raw_string_ostream OS(SA.RegisterName.Value); 5000b57cec5SDimitry Andric OS << printReg(Arg.getRegister(), &TRI); 5010b57cec5SDimitry Andric } else 5020b57cec5SDimitry Andric SA.StackOffset = Arg.getStackOffset(); 5030b57cec5SDimitry Andric // Check and update the optional mask. 5040b57cec5SDimitry Andric if (Arg.isMasked()) 5050b57cec5SDimitry Andric SA.Mask = Arg.getMask(); 5060b57cec5SDimitry Andric 5070b57cec5SDimitry Andric A = SA; 5080b57cec5SDimitry Andric return true; 5090b57cec5SDimitry Andric }; 5100b57cec5SDimitry Andric 5110b57cec5SDimitry Andric bool Any = false; 5120b57cec5SDimitry Andric Any |= convertArg(AI.PrivateSegmentBuffer, ArgInfo.PrivateSegmentBuffer); 5130b57cec5SDimitry Andric Any |= convertArg(AI.DispatchPtr, ArgInfo.DispatchPtr); 5140b57cec5SDimitry Andric Any |= convertArg(AI.QueuePtr, ArgInfo.QueuePtr); 5150b57cec5SDimitry Andric Any |= convertArg(AI.KernargSegmentPtr, ArgInfo.KernargSegmentPtr); 5160b57cec5SDimitry Andric Any |= convertArg(AI.DispatchID, ArgInfo.DispatchID); 5170b57cec5SDimitry Andric Any |= convertArg(AI.FlatScratchInit, ArgInfo.FlatScratchInit); 5180b57cec5SDimitry Andric Any |= convertArg(AI.PrivateSegmentSize, ArgInfo.PrivateSegmentSize); 5190b57cec5SDimitry Andric Any |= convertArg(AI.WorkGroupIDX, ArgInfo.WorkGroupIDX); 5200b57cec5SDimitry Andric Any |= convertArg(AI.WorkGroupIDY, ArgInfo.WorkGroupIDY); 5210b57cec5SDimitry Andric Any |= convertArg(AI.WorkGroupIDZ, ArgInfo.WorkGroupIDZ); 5220b57cec5SDimitry Andric Any |= convertArg(AI.WorkGroupInfo, ArgInfo.WorkGroupInfo); 5230b57cec5SDimitry Andric Any |= convertArg(AI.PrivateSegmentWaveByteOffset, 5240b57cec5SDimitry Andric ArgInfo.PrivateSegmentWaveByteOffset); 5250b57cec5SDimitry Andric Any |= convertArg(AI.ImplicitArgPtr, ArgInfo.ImplicitArgPtr); 5260b57cec5SDimitry Andric Any |= convertArg(AI.ImplicitBufferPtr, ArgInfo.ImplicitBufferPtr); 5270b57cec5SDimitry Andric Any |= convertArg(AI.WorkItemIDX, ArgInfo.WorkItemIDX); 5280b57cec5SDimitry Andric Any |= convertArg(AI.WorkItemIDY, ArgInfo.WorkItemIDY); 5290b57cec5SDimitry Andric Any |= convertArg(AI.WorkItemIDZ, ArgInfo.WorkItemIDZ); 5300b57cec5SDimitry Andric 5310b57cec5SDimitry Andric if (Any) 5320b57cec5SDimitry Andric return AI; 5330b57cec5SDimitry Andric 5340b57cec5SDimitry Andric return None; 5350b57cec5SDimitry Andric } 5360b57cec5SDimitry Andric 5370b57cec5SDimitry Andric yaml::SIMachineFunctionInfo::SIMachineFunctionInfo( 538fe6060f1SDimitry Andric const llvm::SIMachineFunctionInfo &MFI, const TargetRegisterInfo &TRI, 539fe6060f1SDimitry Andric const llvm::MachineFunction &MF) 5400b57cec5SDimitry Andric : ExplicitKernArgSize(MFI.getExplicitKernArgSize()), 541e8d8bef9SDimitry Andric MaxKernArgAlign(MFI.getMaxKernArgAlign()), LDSSize(MFI.getLDSSize()), 542e8d8bef9SDimitry Andric DynLDSAlign(MFI.getDynLDSAlign()), IsEntryFunction(MFI.isEntryFunction()), 5430b57cec5SDimitry Andric NoSignedZerosFPMath(MFI.hasNoSignedZerosFPMath()), 544e8d8bef9SDimitry Andric MemoryBound(MFI.isMemoryBound()), WaveLimiter(MFI.needsWaveLimiter()), 545e8d8bef9SDimitry Andric HasSpilledSGPRs(MFI.hasSpilledSGPRs()), 546e8d8bef9SDimitry Andric HasSpilledVGPRs(MFI.hasSpilledVGPRs()), 5478bcb0991SDimitry Andric HighBitsOf32BitAddress(MFI.get32BitAddressHighBits()), 548e8d8bef9SDimitry Andric Occupancy(MFI.getOccupancy()), 5490b57cec5SDimitry Andric ScratchRSrcReg(regToString(MFI.getScratchRSrcReg(), TRI)), 5500b57cec5SDimitry Andric FrameOffsetReg(regToString(MFI.getFrameOffsetReg(), TRI)), 5510b57cec5SDimitry Andric StackPtrOffsetReg(regToString(MFI.getStackPtrOffsetReg(), TRI)), 552e8d8bef9SDimitry Andric ArgInfo(convertArgumentInfo(MFI.getArgInfo(), TRI)), Mode(MFI.getMode()) { 553fe6060f1SDimitry Andric auto SFI = MFI.getOptionalScavengeFI(); 554fe6060f1SDimitry Andric if (SFI) 555fe6060f1SDimitry Andric ScavengeFI = yaml::FrameIndex(*SFI, MF.getFrameInfo()); 556e8d8bef9SDimitry Andric } 5570b57cec5SDimitry Andric 5580b57cec5SDimitry Andric void yaml::SIMachineFunctionInfo::mappingImpl(yaml::IO &YamlIO) { 5590b57cec5SDimitry Andric MappingTraits<SIMachineFunctionInfo>::mapping(YamlIO, *this); 5600b57cec5SDimitry Andric } 5610b57cec5SDimitry Andric 5620b57cec5SDimitry Andric bool SIMachineFunctionInfo::initializeBaseYamlFields( 563fe6060f1SDimitry Andric const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, 564fe6060f1SDimitry Andric PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) { 5650b57cec5SDimitry Andric ExplicitKernArgSize = YamlMFI.ExplicitKernArgSize; 5668bcb0991SDimitry Andric MaxKernArgAlign = assumeAligned(YamlMFI.MaxKernArgAlign); 5670b57cec5SDimitry Andric LDSSize = YamlMFI.LDSSize; 568e8d8bef9SDimitry Andric DynLDSAlign = YamlMFI.DynLDSAlign; 5698bcb0991SDimitry Andric HighBitsOf32BitAddress = YamlMFI.HighBitsOf32BitAddress; 570e8d8bef9SDimitry Andric Occupancy = YamlMFI.Occupancy; 5710b57cec5SDimitry Andric IsEntryFunction = YamlMFI.IsEntryFunction; 5720b57cec5SDimitry Andric NoSignedZerosFPMath = YamlMFI.NoSignedZerosFPMath; 5730b57cec5SDimitry Andric MemoryBound = YamlMFI.MemoryBound; 5740b57cec5SDimitry Andric WaveLimiter = YamlMFI.WaveLimiter; 575e8d8bef9SDimitry Andric HasSpilledSGPRs = YamlMFI.HasSpilledSGPRs; 576e8d8bef9SDimitry Andric HasSpilledVGPRs = YamlMFI.HasSpilledVGPRs; 577fe6060f1SDimitry Andric 578fe6060f1SDimitry Andric if (YamlMFI.ScavengeFI) { 579fe6060f1SDimitry Andric auto FIOrErr = YamlMFI.ScavengeFI->getFI(MF.getFrameInfo()); 580fe6060f1SDimitry Andric if (!FIOrErr) { 581fe6060f1SDimitry Andric // Create a diagnostic for a the frame index. 582fe6060f1SDimitry Andric const MemoryBuffer &Buffer = 583fe6060f1SDimitry Andric *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID()); 584fe6060f1SDimitry Andric 585fe6060f1SDimitry Andric Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1, 1, 586fe6060f1SDimitry Andric SourceMgr::DK_Error, toString(FIOrErr.takeError()), 587fe6060f1SDimitry Andric "", None, None); 588fe6060f1SDimitry Andric SourceRange = YamlMFI.ScavengeFI->SourceRange; 589fe6060f1SDimitry Andric return true; 590fe6060f1SDimitry Andric } 591fe6060f1SDimitry Andric ScavengeFI = *FIOrErr; 592fe6060f1SDimitry Andric } else { 593fe6060f1SDimitry Andric ScavengeFI = None; 594fe6060f1SDimitry Andric } 5950b57cec5SDimitry Andric return false; 5960b57cec5SDimitry Andric } 5975ffd83dbSDimitry Andric 598349cc55cSDimitry Andric bool SIMachineFunctionInfo::usesAGPRs(const MachineFunction &MF) const { 599349cc55cSDimitry Andric if (UsesAGPRs) 600349cc55cSDimitry Andric return *UsesAGPRs; 601349cc55cSDimitry Andric 602349cc55cSDimitry Andric if (!AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv()) || 603349cc55cSDimitry Andric MF.getFrameInfo().hasCalls()) { 604349cc55cSDimitry Andric UsesAGPRs = true; 605349cc55cSDimitry Andric return true; 606349cc55cSDimitry Andric } 607349cc55cSDimitry Andric 608349cc55cSDimitry Andric const MachineRegisterInfo &MRI = MF.getRegInfo(); 609349cc55cSDimitry Andric 610349cc55cSDimitry Andric for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) { 611349cc55cSDimitry Andric const Register Reg = Register::index2VirtReg(I); 612349cc55cSDimitry Andric const TargetRegisterClass *RC = MRI.getRegClassOrNull(Reg); 613349cc55cSDimitry Andric if (RC && SIRegisterInfo::isAGPRClass(RC)) { 614349cc55cSDimitry Andric UsesAGPRs = true; 615349cc55cSDimitry Andric return true; 616349cc55cSDimitry Andric } else if (!RC && !MRI.use_empty(Reg) && MRI.getType(Reg).isValid()) { 617349cc55cSDimitry Andric // Defer caching UsesAGPRs, function might not yet been regbank selected. 618349cc55cSDimitry Andric return true; 619349cc55cSDimitry Andric } 620349cc55cSDimitry Andric } 621349cc55cSDimitry Andric 622349cc55cSDimitry Andric for (MCRegister Reg : AMDGPU::AGPR_32RegClass) { 623349cc55cSDimitry Andric if (MRI.isPhysRegUsed(Reg)) { 624349cc55cSDimitry Andric UsesAGPRs = true; 625349cc55cSDimitry Andric return true; 626349cc55cSDimitry Andric } 627349cc55cSDimitry Andric } 628349cc55cSDimitry Andric 629349cc55cSDimitry Andric UsesAGPRs = false; 630349cc55cSDimitry Andric return false; 631349cc55cSDimitry Andric } 632