xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This pass replaces accesses to kernel arguments with loads from
10 /// offsets from the kernarg base pointer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "GCNSubtarget.h"
16 #include "llvm/CodeGen/TargetPassConfig.h"
17 #include "llvm/IR/IRBuilder.h"
18 #include "llvm/IR/IntrinsicsAMDGPU.h"
19 #include "llvm/IR/MDBuilder.h"
20 #include "llvm/Target/TargetMachine.h"
21 
22 #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
23 
24 using namespace llvm;
25 
26 namespace {
27 
28 class PreloadKernelArgInfo {
29 private:
30   Function &F;
31   const GCNSubtarget &ST;
32   unsigned NumFreeUserSGPRs;
33 
34 public:
35   SmallVector<llvm::Metadata *, 8> KernelArgMetadata;
36 
37   PreloadKernelArgInfo(Function &F, const GCNSubtarget &ST) : F(F), ST(ST) {
38     setInitialFreeUserSGPRsCount();
39   }
40 
41   // Returns the maximum number of user SGPRs that we have available to preload
42   // arguments.
43   void setInitialFreeUserSGPRsCount() {
44     const unsigned MaxUserSGPRs = ST.getMaxNumUserSGPRs();
45     GCNUserSGPRUsageInfo UserSGPRInfo(F, ST);
46 
47     NumFreeUserSGPRs = MaxUserSGPRs - UserSGPRInfo.getNumUsedUserSGPRs();
48   }
49 
50   bool tryAllocPreloadSGPRs(unsigned AllocSize, uint64_t ArgOffset,
51                             uint64_t LastExplicitArgOffset) {
52     //  Check if this argument may be loaded into the same register as the
53     //  previous argument.
54     if (!isAligned(Align(4), ArgOffset) && AllocSize < 4)
55       return true;
56 
57     // Pad SGPRs for kernarg alignment.
58     unsigned Padding = ArgOffset - LastExplicitArgOffset;
59     unsigned PaddingSGPRs = alignTo(Padding, 4) / 4;
60     unsigned NumPreloadSGPRs = alignTo(AllocSize, 4) / 4;
61     if (NumPreloadSGPRs + PaddingSGPRs > NumFreeUserSGPRs)
62       return false;
63 
64     NumFreeUserSGPRs -= (NumPreloadSGPRs + PaddingSGPRs);
65     return true;
66   }
67 };
68 
69 class AMDGPULowerKernelArguments : public FunctionPass {
70 public:
71   static char ID;
72 
73   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
74 
75   bool runOnFunction(Function &F) override;
76 
77   void getAnalysisUsage(AnalysisUsage &AU) const override {
78     AU.addRequired<TargetPassConfig>();
79     AU.setPreservesAll();
80  }
81 };
82 
83 } // end anonymous namespace
84 
85 // skip allocas
86 static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
87   BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
88   for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
89     AllocaInst *AI = dyn_cast<AllocaInst>(&*InsPt);
90 
91     // If this is a dynamic alloca, the value may depend on the loaded kernargs,
92     // so loads will need to be inserted before it.
93     if (!AI || !AI->isStaticAlloca())
94       break;
95   }
96 
97   return InsPt;
98 }
99 
100 static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
101   CallingConv::ID CC = F.getCallingConv();
102   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
103     return false;
104 
105   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
106   LLVMContext &Ctx = F.getParent()->getContext();
107   const DataLayout &DL = F.getParent()->getDataLayout();
108   BasicBlock &EntryBlock = *F.begin();
109   IRBuilder<> Builder(&EntryBlock, getInsertPt(EntryBlock));
110 
111   const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
112   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
113 
114   Align MaxAlign;
115   // FIXME: Alignment is broken with explicit arg offset.;
116   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
117   if (TotalKernArgSize == 0)
118     return false;
119 
120   CallInst *KernArgSegment =
121       Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
122                               nullptr, F.getName() + ".kernarg.segment");
123 
124   KernArgSegment->addRetAttr(Attribute::NonNull);
125   KernArgSegment->addRetAttr(
126       Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
127 
128   uint64_t ExplicitArgOffset = 0;
129   // Preloaded kernel arguments must be sequential.
130   bool InPreloadSequence = true;
131   PreloadKernelArgInfo PreloadInfo(F, ST);
132 
133   for (Argument &Arg : F.args()) {
134     const bool IsByRef = Arg.hasByRefAttr();
135     Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
136     MaybeAlign ParamAlign = IsByRef ? Arg.getParamAlign() : std::nullopt;
137     Align ABITypeAlign = DL.getValueOrABITypeAlignment(ParamAlign, ArgTy);
138 
139     uint64_t Size = DL.getTypeSizeInBits(ArgTy);
140     uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
141 
142     uint64_t EltOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
143     uint64_t LastExplicitArgOffset = ExplicitArgOffset;
144     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
145 
146     // Try to preload this argument into user SGPRs.
147     if (Arg.hasInRegAttr() && InPreloadSequence && ST.hasKernargPreload() &&
148         !ST.needsKernargPreloadBackwardsCompatibility() &&
149         !Arg.getType()->isAggregateType())
150       if (PreloadInfo.tryAllocPreloadSGPRs(AllocSize, EltOffset,
151                                            LastExplicitArgOffset))
152         continue;
153 
154     InPreloadSequence = false;
155 
156     if (Arg.use_empty())
157       continue;
158 
159     // If this is byval, the loads are already explicit in the function. We just
160     // need to rewrite the pointer values.
161     if (IsByRef) {
162       Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
163           Builder.getInt8Ty(), KernArgSegment, EltOffset,
164           Arg.getName() + ".byval.kernarg.offset");
165 
166       Value *CastOffsetPtr =
167           Builder.CreateAddrSpaceCast(ArgOffsetPtr, Arg.getType());
168       Arg.replaceAllUsesWith(CastOffsetPtr);
169       continue;
170     }
171 
172     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
173       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
174       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
175       // can't represent this with range metadata because it's only allowed for
176       // integer types.
177       if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
178            PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
179           !ST.hasUsableDSOffset())
180         continue;
181 
182       // FIXME: We can replace this with equivalent alias.scope/noalias
183       // metadata, but this appears to be a lot of work.
184       if (Arg.hasNoAliasAttr())
185         continue;
186     }
187 
188     auto *VT = dyn_cast<FixedVectorType>(ArgTy);
189     bool IsV3 = VT && VT->getNumElements() == 3;
190     bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
191 
192     VectorType *V4Ty = nullptr;
193 
194     int64_t AlignDownOffset = alignDown(EltOffset, 4);
195     int64_t OffsetDiff = EltOffset - AlignDownOffset;
196     Align AdjustedAlign = commonAlignment(
197         KernArgBaseAlign, DoShiftOpt ? AlignDownOffset : EltOffset);
198 
199     Value *ArgPtr;
200     Type *AdjustedArgTy;
201     if (DoShiftOpt) { // FIXME: Handle aggregate types
202       // Since we don't have sub-dword scalar loads, avoid doing an extload by
203       // loading earlier than the argument address, and extracting the relevant
204       // bits.
205       // TODO: Update this for GFX12 which does have scalar sub-dword loads.
206       //
207       // Additionally widen any sub-dword load to i32 even if suitably aligned,
208       // so that CSE between different argument loads works easily.
209       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
210           Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
211           Arg.getName() + ".kernarg.offset.align.down");
212       AdjustedArgTy = Builder.getInt32Ty();
213     } else {
214       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
215           Builder.getInt8Ty(), KernArgSegment, EltOffset,
216           Arg.getName() + ".kernarg.offset");
217       AdjustedArgTy = ArgTy;
218     }
219 
220     if (IsV3 && Size >= 32) {
221       V4Ty = FixedVectorType::get(VT->getElementType(), 4);
222       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
223       AdjustedArgTy = V4Ty;
224     }
225 
226     LoadInst *Load =
227         Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
228     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
229 
230     MDBuilder MDB(Ctx);
231 
232     if (isa<PointerType>(ArgTy)) {
233       if (Arg.hasNonNullAttr())
234         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
235 
236       uint64_t DerefBytes = Arg.getDereferenceableBytes();
237       if (DerefBytes != 0) {
238         Load->setMetadata(
239           LLVMContext::MD_dereferenceable,
240           MDNode::get(Ctx,
241                       MDB.createConstant(
242                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
243       }
244 
245       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
246       if (DerefOrNullBytes != 0) {
247         Load->setMetadata(
248           LLVMContext::MD_dereferenceable_or_null,
249           MDNode::get(Ctx,
250                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
251                                                           DerefOrNullBytes))));
252       }
253 
254       if (MaybeAlign ParamAlign = Arg.getParamAlign()) {
255         Load->setMetadata(
256             LLVMContext::MD_align,
257             MDNode::get(Ctx, MDB.createConstant(ConstantInt::get(
258                                  Builder.getInt64Ty(), ParamAlign->value()))));
259       }
260     }
261 
262     // TODO: Convert noalias arg to !noalias
263 
264     if (DoShiftOpt) {
265       Value *ExtractBits = OffsetDiff == 0 ?
266         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
267 
268       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
269       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
270       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
271                                             Arg.getName() + ".load");
272       Arg.replaceAllUsesWith(NewVal);
273     } else if (IsV3) {
274       Value *Shuf = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 2},
275                                                 Arg.getName() + ".load");
276       Arg.replaceAllUsesWith(Shuf);
277     } else {
278       Load->setName(Arg.getName() + ".load");
279       Arg.replaceAllUsesWith(Load);
280     }
281   }
282 
283   KernArgSegment->addRetAttr(
284       Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
285 
286   return true;
287 }
288 
289 bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
290   auto &TPC = getAnalysis<TargetPassConfig>();
291   const TargetMachine &TM = TPC.getTM<TargetMachine>();
292   return lowerKernelArguments(F, TM);
293 }
294 
295 INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
296                       "AMDGPU Lower Kernel Arguments", false, false)
297 INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
298                     false, false)
299 
300 char AMDGPULowerKernelArguments::ID = 0;
301 
302 FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
303   return new AMDGPULowerKernelArguments();
304 }
305 
306 PreservedAnalyses
307 AMDGPULowerKernelArgumentsPass::run(Function &F, FunctionAnalysisManager &AM) {
308   bool Changed = lowerKernelArguments(F, TM);
309   if (Changed) {
310     // TODO: Preserves a lot more.
311     PreservedAnalyses PA;
312     PA.preserveSet<CFGAnalyses>();
313     return PA;
314   }
315 
316   return PreservedAnalyses::all();
317 }
318