xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp (revision 3e8eb5c7f4909209c042403ddee340b2ee7003a5)
1 //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates LDS uses from non-kernel functions.
10 //
11 // The strategy is to create a new struct with a field for each LDS variable
12 // and allocate that struct at the same address for every kernel. Uses of the
13 // original LDS variables are then replaced with compile time offsets from that
14 // known address. AMDGPUMachineFunction allocates the LDS global.
15 //
16 // Local variables with constant annotation or non-undef initializer are passed
17 // through unchanged for simplication or error diagnostics in later passes.
18 //
19 // To reduce the memory overhead variables that are only used by kernels are
20 // excluded from this transform. The analysis to determine whether a variable
21 // is only used by a kernel is cheap and conservative so this may allocate
22 // a variable in every kernel when it was not strictly necessary to do so.
23 //
24 // A possible future refinement is to specialise the structure per-kernel, so
25 // that fields can be elided based on more expensive analysis.
26 //
27 //===----------------------------------------------------------------------===//
28 
29 #include "AMDGPU.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "Utils/AMDGPULDSUtils.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/OptimizedStructLayout.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
45 #include <vector>
46 
47 #define DEBUG_TYPE "amdgpu-lower-module-lds"
48 
49 using namespace llvm;
50 
51 static cl::opt<bool> SuperAlignLDSGlobals(
52     "amdgpu-super-align-lds-globals",
53     cl::desc("Increase alignment of LDS if it is not on align boundary"),
54     cl::init(true), cl::Hidden);
55 
56 namespace {
57 
58 SmallPtrSet<GlobalValue *, 32> getUsedList(Module &M) {
59   SmallPtrSet<GlobalValue *, 32> UsedList;
60 
61   SmallVector<GlobalValue *, 32> TmpVec;
62   collectUsedGlobalVariables(M, TmpVec, true);
63   UsedList.insert(TmpVec.begin(), TmpVec.end());
64 
65   TmpVec.clear();
66   collectUsedGlobalVariables(M, TmpVec, false);
67   UsedList.insert(TmpVec.begin(), TmpVec.end());
68 
69   return UsedList;
70 }
71 
72 class AMDGPULowerModuleLDS : public ModulePass {
73 
74   static void removeFromUsedList(Module &M, StringRef Name,
75                                  SmallPtrSetImpl<Constant *> &ToRemove) {
76     GlobalVariable *GV = M.getNamedGlobal(Name);
77     if (!GV || ToRemove.empty()) {
78       return;
79     }
80 
81     SmallVector<Constant *, 16> Init;
82     auto *CA = cast<ConstantArray>(GV->getInitializer());
83     for (auto &Op : CA->operands()) {
84       // ModuleUtils::appendToUsed only inserts Constants
85       Constant *C = cast<Constant>(Op);
86       if (!ToRemove.contains(C->stripPointerCasts())) {
87         Init.push_back(C);
88       }
89     }
90 
91     if (Init.size() == CA->getNumOperands()) {
92       return; // none to remove
93     }
94 
95     GV->eraseFromParent();
96 
97     for (Constant *C : ToRemove) {
98       C->removeDeadConstantUsers();
99     }
100 
101     if (!Init.empty()) {
102       ArrayType *ATy =
103           ArrayType::get(Type::getInt8PtrTy(M.getContext()), Init.size());
104       GV =
105           new llvm::GlobalVariable(M, ATy, false, GlobalValue::AppendingLinkage,
106                                    ConstantArray::get(ATy, Init), Name);
107       GV->setSection("llvm.metadata");
108     }
109   }
110 
111   static void
112   removeFromUsedLists(Module &M,
113                       const std::vector<GlobalVariable *> &LocalVars) {
114     SmallPtrSet<Constant *, 32> LocalVarsSet;
115     for (GlobalVariable *LocalVar : LocalVars)
116       if (Constant *C = dyn_cast<Constant>(LocalVar->stripPointerCasts()))
117         LocalVarsSet.insert(C);
118     removeFromUsedList(M, "llvm.used", LocalVarsSet);
119     removeFromUsedList(M, "llvm.compiler.used", LocalVarsSet);
120   }
121 
122   static void markUsedByKernel(IRBuilder<> &Builder, Function *Func,
123                                GlobalVariable *SGV) {
124     // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
125     // that might call a function which accesses a field within it. This is
126     // presently approximated to 'all kernels' if there are any such functions
127     // in the module. This implicit use is redefined as an explicit use here so
128     // that later passes, specifically PromoteAlloca, account for the required
129     // memory without any knowledge of this transform.
130 
131     // An operand bundle on llvm.donothing works because the call instruction
132     // survives until after the last pass that needs to account for LDS. It is
133     // better than inline asm as the latter survives until the end of codegen. A
134     // totally robust solution would be a function with the same semantics as
135     // llvm.donothing that takes a pointer to the instance and is lowered to a
136     // no-op after LDS is allocated, but that is not presently necessary.
137 
138     LLVMContext &Ctx = Func->getContext();
139 
140     Builder.SetInsertPoint(Func->getEntryBlock().getFirstNonPHI());
141 
142     FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx), {});
143 
144     Function *Decl =
145         Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
146 
147     Value *UseInstance[1] = {Builder.CreateInBoundsGEP(
148         SGV->getValueType(), SGV, ConstantInt::get(Type::getInt32Ty(Ctx), 0))};
149 
150     Builder.CreateCall(FTy, Decl, {},
151                        {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)},
152                        "");
153   }
154 
155 private:
156   SmallPtrSet<GlobalValue *, 32> UsedList;
157 
158 public:
159   static char ID;
160 
161   AMDGPULowerModuleLDS() : ModulePass(ID) {
162     initializeAMDGPULowerModuleLDSPass(*PassRegistry::getPassRegistry());
163   }
164 
165   bool runOnModule(Module &M) override {
166     UsedList = getUsedList(M);
167     bool Changed = superAlignLDSGlobals(M);
168     Changed |= processUsedLDS(M);
169 
170     for (Function &F : M.functions()) {
171       if (F.isDeclaration())
172         continue;
173 
174       // Only lower compute kernels' LDS.
175       if (!AMDGPU::isKernel(F.getCallingConv()))
176         continue;
177       Changed |= processUsedLDS(M, &F);
178     }
179 
180     UsedList.clear();
181     return Changed;
182   }
183 
184 private:
185   // Increase the alignment of LDS globals if necessary to maximise the chance
186   // that we can use aligned LDS instructions to access them.
187   static bool superAlignLDSGlobals(Module &M) {
188     const DataLayout &DL = M.getDataLayout();
189     bool Changed = false;
190     if (!SuperAlignLDSGlobals) {
191       return Changed;
192     }
193 
194     for (auto &GV : M.globals()) {
195       if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
196         // Only changing alignment of LDS variables
197         continue;
198       }
199       if (!GV.hasInitializer()) {
200         // cuda/hip extern __shared__ variable, leave alignment alone
201         continue;
202       }
203 
204       Align Alignment = AMDGPU::getAlign(DL, &GV);
205       TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
206 
207       if (GVSize > 8) {
208         // We might want to use a b96 or b128 load/store
209         Alignment = std::max(Alignment, Align(16));
210       } else if (GVSize > 4) {
211         // We might want to use a b64 load/store
212         Alignment = std::max(Alignment, Align(8));
213       } else if (GVSize > 2) {
214         // We might want to use a b32 load/store
215         Alignment = std::max(Alignment, Align(4));
216       } else if (GVSize > 1) {
217         // We might want to use a b16 load/store
218         Alignment = std::max(Alignment, Align(2));
219       }
220 
221       if (Alignment != AMDGPU::getAlign(DL, &GV)) {
222         Changed = true;
223         GV.setAlignment(Alignment);
224       }
225     }
226     return Changed;
227   }
228 
229   bool processUsedLDS(Module &M, Function *F = nullptr) {
230     LLVMContext &Ctx = M.getContext();
231     const DataLayout &DL = M.getDataLayout();
232 
233     // Find variables to move into new struct instance
234     std::vector<GlobalVariable *> FoundLocalVars =
235         AMDGPU::findVariablesToLower(M, F);
236 
237     if (FoundLocalVars.empty()) {
238       // No variables to rewrite, no changes made.
239       return false;
240     }
241 
242     SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
243     LayoutFields.reserve(FoundLocalVars.size());
244     for (GlobalVariable *GV : FoundLocalVars) {
245       OptimizedStructLayoutField F(GV, DL.getTypeAllocSize(GV->getValueType()),
246                                    AMDGPU::getAlign(DL, GV));
247       LayoutFields.emplace_back(F);
248     }
249 
250     performOptimizedStructLayout(LayoutFields);
251 
252     std::vector<GlobalVariable *> LocalVars;
253     LocalVars.reserve(FoundLocalVars.size()); // will be at least this large
254     {
255       // This usually won't need to insert any padding, perhaps avoid the alloc
256       uint64_t CurrentOffset = 0;
257       for (size_t I = 0; I < LayoutFields.size(); I++) {
258         GlobalVariable *FGV = static_cast<GlobalVariable *>(
259             const_cast<void *>(LayoutFields[I].Id));
260         Align DataAlign = LayoutFields[I].Alignment;
261 
262         uint64_t DataAlignV = DataAlign.value();
263         if (uint64_t Rem = CurrentOffset % DataAlignV) {
264           uint64_t Padding = DataAlignV - Rem;
265 
266           // Append an array of padding bytes to meet alignment requested
267           // Note (o +      (a - (o % a)) ) % a == 0
268           //      (offset + Padding       ) % align == 0
269 
270           Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
271           LocalVars.push_back(new GlobalVariable(
272               M, ATy, false, GlobalValue::InternalLinkage, UndefValue::get(ATy),
273               "", nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
274               false));
275           CurrentOffset += Padding;
276         }
277 
278         LocalVars.push_back(FGV);
279         CurrentOffset += LayoutFields[I].Size;
280       }
281     }
282 
283     std::vector<Type *> LocalVarTypes;
284     LocalVarTypes.reserve(LocalVars.size());
285     std::transform(
286         LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
287         [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
288 
289     std::string VarName(
290         F ? (Twine("llvm.amdgcn.kernel.") + F->getName() + ".lds").str()
291           : "llvm.amdgcn.module.lds");
292     StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
293 
294     Align StructAlign =
295         AMDGPU::getAlign(DL, LocalVars[0]);
296 
297     GlobalVariable *SGV = new GlobalVariable(
298         M, LDSTy, false, GlobalValue::InternalLinkage, UndefValue::get(LDSTy),
299         VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
300         false);
301     SGV->setAlignment(StructAlign);
302     if (!F) {
303       appendToCompilerUsed(
304           M, {static_cast<GlobalValue *>(
305                  ConstantExpr::getPointerBitCastOrAddrSpaceCast(
306                      cast<Constant>(SGV), Type::getInt8PtrTy(Ctx)))});
307     }
308 
309     // The verifier rejects used lists containing an inttoptr of a constant
310     // so remove the variables from these lists before replaceAllUsesWith
311     removeFromUsedLists(M, LocalVars);
312 
313     // Create alias.scope and their lists. Each field in the new structure
314     // does not alias with all other fields.
315     SmallVector<MDNode *> AliasScopes;
316     SmallVector<Metadata *> NoAliasList;
317     if (LocalVars.size() > 1) {
318       MDBuilder MDB(Ctx);
319       AliasScopes.reserve(LocalVars.size());
320       MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
321       for (size_t I = 0; I < LocalVars.size(); I++) {
322         MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
323         AliasScopes.push_back(Scope);
324       }
325       NoAliasList.append(&AliasScopes[1], AliasScopes.end());
326     }
327 
328     // Replace uses of ith variable with a constantexpr to the ith field of the
329     // instance that will be allocated by AMDGPUMachineFunction
330     Type *I32 = Type::getInt32Ty(Ctx);
331     for (size_t I = 0; I < LocalVars.size(); I++) {
332       GlobalVariable *GV = LocalVars[I];
333       Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
334       Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx);
335       if (F) {
336         // Replace all constant uses with instructions if they belong to the
337         // current kernel.
338         for (User *U : make_early_inc_range(GV->users())) {
339           if (ConstantExpr *C = dyn_cast<ConstantExpr>(U))
340             AMDGPU::replaceConstantUsesInFunction(C, F);
341         }
342 
343         GV->removeDeadConstantUsers();
344 
345         GV->replaceUsesWithIf(GEP, [F](Use &U) {
346           Instruction *I = dyn_cast<Instruction>(U.getUser());
347           return I && I->getFunction() == F;
348         });
349       } else {
350         GV->replaceAllUsesWith(GEP);
351       }
352       if (GV->use_empty()) {
353         UsedList.erase(GV);
354         GV->eraseFromParent();
355       }
356 
357       uint64_t Off = DL.getStructLayout(LDSTy)->getElementOffset(I);
358       Align A = commonAlignment(StructAlign, Off);
359 
360       if (I)
361         NoAliasList[I - 1] = AliasScopes[I - 1];
362       MDNode *NoAlias =
363           NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
364       MDNode *AliasScope =
365           AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
366 
367       refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
368     }
369 
370     // This ensures the variable is allocated when called functions access it.
371     // It also lets other passes, specifically PromoteAlloca, accurately
372     // calculate how much LDS will be used by the kernel after lowering.
373     if (!F) {
374       IRBuilder<> Builder(Ctx);
375       for (Function &Func : M.functions()) {
376         if (!Func.isDeclaration() && AMDGPU::isKernelCC(&Func)) {
377           markUsedByKernel(Builder, &Func, SGV);
378         }
379       }
380     }
381     return true;
382   }
383 
384   void refineUsesAlignmentAndAA(Value *Ptr, Align A, const DataLayout &DL,
385                                 MDNode *AliasScope, MDNode *NoAlias,
386                                 unsigned MaxDepth = 5) {
387     if (!MaxDepth || (A == 1 && !AliasScope))
388       return;
389 
390     for (User *U : Ptr->users()) {
391       if (auto *I = dyn_cast<Instruction>(U)) {
392         if (AliasScope && I->mayReadOrWriteMemory()) {
393           MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
394           AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
395                    : AliasScope);
396           I->setMetadata(LLVMContext::MD_alias_scope, AS);
397 
398           MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
399           NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
400           I->setMetadata(LLVMContext::MD_noalias, NA);
401         }
402       }
403 
404       if (auto *LI = dyn_cast<LoadInst>(U)) {
405         LI->setAlignment(std::max(A, LI->getAlign()));
406         continue;
407       }
408       if (auto *SI = dyn_cast<StoreInst>(U)) {
409         if (SI->getPointerOperand() == Ptr)
410           SI->setAlignment(std::max(A, SI->getAlign()));
411         continue;
412       }
413       if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
414         // None of atomicrmw operations can work on pointers, but let's
415         // check it anyway in case it will or we will process ConstantExpr.
416         if (AI->getPointerOperand() == Ptr)
417           AI->setAlignment(std::max(A, AI->getAlign()));
418         continue;
419       }
420       if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
421         if (AI->getPointerOperand() == Ptr)
422           AI->setAlignment(std::max(A, AI->getAlign()));
423         continue;
424       }
425       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
426         unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
427         APInt Off(BitWidth, 0);
428         if (GEP->getPointerOperand() == Ptr) {
429           Align GA;
430           if (GEP->accumulateConstantOffset(DL, Off))
431             GA = commonAlignment(A, Off.getLimitedValue());
432           refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
433                                    MaxDepth - 1);
434         }
435         continue;
436       }
437       if (auto *I = dyn_cast<Instruction>(U)) {
438         if (I->getOpcode() == Instruction::BitCast ||
439             I->getOpcode() == Instruction::AddrSpaceCast)
440           refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
441       }
442     }
443   }
444 };
445 
446 } // namespace
447 char AMDGPULowerModuleLDS::ID = 0;
448 
449 char &llvm::AMDGPULowerModuleLDSID = AMDGPULowerModuleLDS::ID;
450 
451 INITIALIZE_PASS(AMDGPULowerModuleLDS, DEBUG_TYPE,
452                 "Lower uses of LDS variables from non-kernel functions", false,
453                 false)
454 
455 ModulePass *llvm::createAMDGPULowerModuleLDSPass() {
456   return new AMDGPULowerModuleLDS();
457 }
458 
459 PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
460                                                 ModuleAnalysisManager &) {
461   return AMDGPULowerModuleLDS().runOnModule(M) ? PreservedAnalyses::none()
462                                                : PreservedAnalyses::all();
463 }
464