1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "AMDGPUArgumentUsageInfo.h" 10 #include "AMDGPU.h" 11 #include "AMDGPUTargetMachine.h" 12 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 13 #include "SIRegisterInfo.h" 14 #include "llvm/CodeGen/TargetRegisterInfo.h" 15 #include "llvm/IR/Function.h" 16 #include "llvm/Support/NativeFormatting.h" 17 #include "llvm/Support/raw_ostream.h" 18 19 using namespace llvm; 20 21 #define DEBUG_TYPE "amdgpu-argument-reg-usage-info" 22 23 INITIALIZE_PASS(AMDGPUArgumentUsageInfo, DEBUG_TYPE, 24 "Argument Register Usage Information Storage", false, true) 25 26 void ArgDescriptor::print(raw_ostream &OS, 27 const TargetRegisterInfo *TRI) const { 28 if (!isSet()) { 29 OS << "<not set>\n"; 30 return; 31 } 32 33 if (isRegister()) 34 OS << "Reg " << printReg(getRegister(), TRI); 35 else 36 OS << "Stack offset " << getStackOffset(); 37 38 if (isMasked()) { 39 OS << " & "; 40 llvm::write_hex(OS, Mask, llvm::HexPrintStyle::PrefixLower); 41 } 42 43 OS << '\n'; 44 } 45 46 char AMDGPUArgumentUsageInfo::ID = 0; 47 48 const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::ExternFunctionInfo{}; 49 50 // Hardcoded registers from fixed function ABI 51 const AMDGPUFunctionArgInfo AMDGPUArgumentUsageInfo::FixedABIFunctionInfo 52 = AMDGPUFunctionArgInfo::fixedABILayout(); 53 54 bool AMDGPUArgumentUsageInfo::doInitialization(Module &M) { 55 return false; 56 } 57 58 bool AMDGPUArgumentUsageInfo::doFinalization(Module &M) { 59 ArgInfoMap.clear(); 60 return false; 61 } 62 63 // TODO: Print preload kernargs? 64 void AMDGPUArgumentUsageInfo::print(raw_ostream &OS, const Module *M) const { 65 for (const auto &FI : ArgInfoMap) { 66 OS << "Arguments for " << FI.first->getName() << '\n' 67 << " PrivateSegmentBuffer: " << FI.second.PrivateSegmentBuffer 68 << " DispatchPtr: " << FI.second.DispatchPtr 69 << " QueuePtr: " << FI.second.QueuePtr 70 << " KernargSegmentPtr: " << FI.second.KernargSegmentPtr 71 << " DispatchID: " << FI.second.DispatchID 72 << " FlatScratchInit: " << FI.second.FlatScratchInit 73 << " PrivateSegmentSize: " << FI.second.PrivateSegmentSize 74 << " WorkGroupIDX: " << FI.second.WorkGroupIDX 75 << " WorkGroupIDY: " << FI.second.WorkGroupIDY 76 << " WorkGroupIDZ: " << FI.second.WorkGroupIDZ 77 << " WorkGroupInfo: " << FI.second.WorkGroupInfo 78 << " LDSKernelId: " << FI.second.LDSKernelId 79 << " PrivateSegmentWaveByteOffset: " 80 << FI.second.PrivateSegmentWaveByteOffset 81 << " ImplicitBufferPtr: " << FI.second.ImplicitBufferPtr 82 << " ImplicitArgPtr: " << FI.second.ImplicitArgPtr 83 << " WorkItemIDX " << FI.second.WorkItemIDX 84 << " WorkItemIDY " << FI.second.WorkItemIDY 85 << " WorkItemIDZ " << FI.second.WorkItemIDZ 86 << '\n'; 87 } 88 } 89 90 std::tuple<const ArgDescriptor *, const TargetRegisterClass *, LLT> 91 AMDGPUFunctionArgInfo::getPreloadedValue( 92 AMDGPUFunctionArgInfo::PreloadedValue Value) const { 93 switch (Value) { 94 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_BUFFER: { 95 return std::tuple(PrivateSegmentBuffer ? &PrivateSegmentBuffer : nullptr, 96 &AMDGPU::SGPR_128RegClass, LLT::fixed_vector(4, 32)); 97 } 98 case AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR: 99 return std::tuple(ImplicitBufferPtr ? &ImplicitBufferPtr : nullptr, 100 &AMDGPU::SGPR_64RegClass, 101 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 102 case AMDGPUFunctionArgInfo::WORKGROUP_ID_X: 103 return std::tuple(WorkGroupIDX ? &WorkGroupIDX : nullptr, 104 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 105 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Y: 106 return std::tuple(WorkGroupIDY ? &WorkGroupIDY : nullptr, 107 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 108 case AMDGPUFunctionArgInfo::WORKGROUP_ID_Z: 109 return std::tuple(WorkGroupIDZ ? &WorkGroupIDZ : nullptr, 110 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 111 case AMDGPUFunctionArgInfo::LDS_KERNEL_ID: 112 return std::tuple(LDSKernelId ? &LDSKernelId : nullptr, 113 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 114 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET: 115 return std::tuple( 116 PrivateSegmentWaveByteOffset ? &PrivateSegmentWaveByteOffset : nullptr, 117 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)); 118 case AMDGPUFunctionArgInfo::PRIVATE_SEGMENT_SIZE: 119 return {PrivateSegmentSize ? &PrivateSegmentSize : nullptr, 120 &AMDGPU::SGPR_32RegClass, LLT::scalar(32)}; 121 case AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR: 122 return std::tuple(KernargSegmentPtr ? &KernargSegmentPtr : nullptr, 123 &AMDGPU::SGPR_64RegClass, 124 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 125 case AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR: 126 return std::tuple(ImplicitArgPtr ? &ImplicitArgPtr : nullptr, 127 &AMDGPU::SGPR_64RegClass, 128 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 129 case AMDGPUFunctionArgInfo::DISPATCH_ID: 130 return std::tuple(DispatchID ? &DispatchID : nullptr, 131 &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); 132 case AMDGPUFunctionArgInfo::FLAT_SCRATCH_INIT: 133 return std::tuple(FlatScratchInit ? &FlatScratchInit : nullptr, 134 &AMDGPU::SGPR_64RegClass, LLT::scalar(64)); 135 case AMDGPUFunctionArgInfo::DISPATCH_PTR: 136 return std::tuple(DispatchPtr ? &DispatchPtr : nullptr, 137 &AMDGPU::SGPR_64RegClass, 138 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 139 case AMDGPUFunctionArgInfo::QUEUE_PTR: 140 return std::tuple(QueuePtr ? &QueuePtr : nullptr, &AMDGPU::SGPR_64RegClass, 141 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64)); 142 case AMDGPUFunctionArgInfo::WORKITEM_ID_X: 143 return std::tuple(WorkItemIDX ? &WorkItemIDX : nullptr, 144 &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); 145 case AMDGPUFunctionArgInfo::WORKITEM_ID_Y: 146 return std::tuple(WorkItemIDY ? &WorkItemIDY : nullptr, 147 &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); 148 case AMDGPUFunctionArgInfo::WORKITEM_ID_Z: 149 return std::tuple(WorkItemIDZ ? &WorkItemIDZ : nullptr, 150 &AMDGPU::VGPR_32RegClass, LLT::scalar(32)); 151 } 152 llvm_unreachable("unexpected preloaded value type"); 153 } 154 155 AMDGPUFunctionArgInfo AMDGPUFunctionArgInfo::fixedABILayout() { 156 AMDGPUFunctionArgInfo AI; 157 AI.PrivateSegmentBuffer 158 = ArgDescriptor::createRegister(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3); 159 AI.DispatchPtr = ArgDescriptor::createRegister(AMDGPU::SGPR4_SGPR5); 160 AI.QueuePtr = ArgDescriptor::createRegister(AMDGPU::SGPR6_SGPR7); 161 162 // Do not pass kernarg segment pointer, only pass increment version in its 163 // place. 164 AI.ImplicitArgPtr = ArgDescriptor::createRegister(AMDGPU::SGPR8_SGPR9); 165 AI.DispatchID = ArgDescriptor::createRegister(AMDGPU::SGPR10_SGPR11); 166 167 // Skip FlatScratchInit/PrivateSegmentSize 168 AI.WorkGroupIDX = ArgDescriptor::createRegister(AMDGPU::SGPR12); 169 AI.WorkGroupIDY = ArgDescriptor::createRegister(AMDGPU::SGPR13); 170 AI.WorkGroupIDZ = ArgDescriptor::createRegister(AMDGPU::SGPR14); 171 AI.LDSKernelId = ArgDescriptor::createRegister(AMDGPU::SGPR15); 172 173 const unsigned Mask = 0x3ff; 174 AI.WorkItemIDX = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask); 175 AI.WorkItemIDY = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask << 10); 176 AI.WorkItemIDZ = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask << 20); 177 return AI; 178 } 179 180 const AMDGPUFunctionArgInfo & 181 AMDGPUArgumentUsageInfo::lookupFuncArgInfo(const Function &F) const { 182 auto I = ArgInfoMap.find(&F); 183 if (I == ArgInfoMap.end()) 184 return FixedABIFunctionInfo; 185 return I->second; 186 } 187