xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp (revision e8d8bef961a50d4dc22501cde4fb9fb0be1b2532)
10b57cec5SDimitry Andric //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric /// \file This pass replaces accesses to kernel arguments with loads from
100b57cec5SDimitry Andric /// offsets from the kernarg base pointer.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #include "AMDGPU.h"
15*e8d8bef9SDimitry Andric #include "GCNSubtarget.h"
160b57cec5SDimitry Andric #include "llvm/CodeGen/TargetPassConfig.h"
17*e8d8bef9SDimitry Andric #include "llvm/IR/IntrinsicsAMDGPU.h"
180b57cec5SDimitry Andric #include "llvm/IR/MDBuilder.h"
19*e8d8bef9SDimitry Andric #include "llvm/Target/TargetMachine.h"
200b57cec5SDimitry Andric #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
210b57cec5SDimitry Andric 
220b57cec5SDimitry Andric using namespace llvm;
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric namespace {
250b57cec5SDimitry Andric 
260b57cec5SDimitry Andric class AMDGPULowerKernelArguments : public FunctionPass{
270b57cec5SDimitry Andric public:
280b57cec5SDimitry Andric   static char ID;
290b57cec5SDimitry Andric 
300b57cec5SDimitry Andric   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
310b57cec5SDimitry Andric 
320b57cec5SDimitry Andric   bool runOnFunction(Function &F) override;
330b57cec5SDimitry Andric 
340b57cec5SDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override {
350b57cec5SDimitry Andric     AU.addRequired<TargetPassConfig>();
360b57cec5SDimitry Andric     AU.setPreservesAll();
370b57cec5SDimitry Andric  }
380b57cec5SDimitry Andric };
390b57cec5SDimitry Andric 
400b57cec5SDimitry Andric } // end anonymous namespace
410b57cec5SDimitry Andric 
425ffd83dbSDimitry Andric // skip allocas
435ffd83dbSDimitry Andric static BasicBlock::iterator getInsertPt(BasicBlock &BB) {
445ffd83dbSDimitry Andric   BasicBlock::iterator InsPt = BB.getFirstInsertionPt();
455ffd83dbSDimitry Andric   for (BasicBlock::iterator E = BB.end(); InsPt != E; ++InsPt) {
465ffd83dbSDimitry Andric     AllocaInst *AI = dyn_cast<AllocaInst>(&*InsPt);
475ffd83dbSDimitry Andric 
485ffd83dbSDimitry Andric     // If this is a dynamic alloca, the value may depend on the loaded kernargs,
495ffd83dbSDimitry Andric     // so loads will need to be inserted before it.
505ffd83dbSDimitry Andric     if (!AI || !AI->isStaticAlloca())
515ffd83dbSDimitry Andric       break;
525ffd83dbSDimitry Andric   }
535ffd83dbSDimitry Andric 
545ffd83dbSDimitry Andric   return InsPt;
555ffd83dbSDimitry Andric }
565ffd83dbSDimitry Andric 
570b57cec5SDimitry Andric bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
580b57cec5SDimitry Andric   CallingConv::ID CC = F.getCallingConv();
590b57cec5SDimitry Andric   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
600b57cec5SDimitry Andric     return false;
610b57cec5SDimitry Andric 
620b57cec5SDimitry Andric   auto &TPC = getAnalysis<TargetPassConfig>();
630b57cec5SDimitry Andric 
640b57cec5SDimitry Andric   const TargetMachine &TM = TPC.getTM<TargetMachine>();
650b57cec5SDimitry Andric   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
660b57cec5SDimitry Andric   LLVMContext &Ctx = F.getParent()->getContext();
670b57cec5SDimitry Andric   const DataLayout &DL = F.getParent()->getDataLayout();
680b57cec5SDimitry Andric   BasicBlock &EntryBlock = *F.begin();
695ffd83dbSDimitry Andric   IRBuilder<> Builder(&*getInsertPt(EntryBlock));
700b57cec5SDimitry Andric 
718bcb0991SDimitry Andric   const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
720b57cec5SDimitry Andric   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(F);
730b57cec5SDimitry Andric 
748bcb0991SDimitry Andric   Align MaxAlign;
750b57cec5SDimitry Andric   // FIXME: Alignment is broken broken with explicit arg offset.;
760b57cec5SDimitry Andric   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F, MaxAlign);
770b57cec5SDimitry Andric   if (TotalKernArgSize == 0)
780b57cec5SDimitry Andric     return false;
790b57cec5SDimitry Andric 
800b57cec5SDimitry Andric   CallInst *KernArgSegment =
810b57cec5SDimitry Andric       Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, {}, {},
820b57cec5SDimitry Andric                               nullptr, F.getName() + ".kernarg.segment");
830b57cec5SDimitry Andric 
840b57cec5SDimitry Andric   KernArgSegment->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
850b57cec5SDimitry Andric   KernArgSegment->addAttribute(AttributeList::ReturnIndex,
860b57cec5SDimitry Andric     Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
870b57cec5SDimitry Andric 
880b57cec5SDimitry Andric   unsigned AS = KernArgSegment->getType()->getPointerAddressSpace();
890b57cec5SDimitry Andric   uint64_t ExplicitArgOffset = 0;
900b57cec5SDimitry Andric 
910b57cec5SDimitry Andric   for (Argument &Arg : F.args()) {
92*e8d8bef9SDimitry Andric     const bool IsByRef = Arg.hasByRefAttr();
93*e8d8bef9SDimitry Andric     Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType();
94*e8d8bef9SDimitry Andric     MaybeAlign ABITypeAlign = IsByRef ? Arg.getParamAlign() : None;
95*e8d8bef9SDimitry Andric     if (!ABITypeAlign)
96*e8d8bef9SDimitry Andric       ABITypeAlign = DL.getABITypeAlign(ArgTy);
97*e8d8bef9SDimitry Andric 
98*e8d8bef9SDimitry Andric     uint64_t Size = DL.getTypeSizeInBits(ArgTy);
99*e8d8bef9SDimitry Andric     uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
1000b57cec5SDimitry Andric 
1018bcb0991SDimitry Andric     uint64_t EltOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + BaseOffset;
1028bcb0991SDimitry Andric     ExplicitArgOffset = alignTo(ExplicitArgOffset, ABITypeAlign) + AllocSize;
1030b57cec5SDimitry Andric 
1040b57cec5SDimitry Andric     if (Arg.use_empty())
1050b57cec5SDimitry Andric       continue;
1060b57cec5SDimitry Andric 
107*e8d8bef9SDimitry Andric     // If this is byval, the loads are already explicit in the function. We just
108*e8d8bef9SDimitry Andric     // need to rewrite the pointer values.
109*e8d8bef9SDimitry Andric     if (IsByRef) {
110*e8d8bef9SDimitry Andric       Value *ArgOffsetPtr = Builder.CreateConstInBoundsGEP1_64(
111*e8d8bef9SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, EltOffset,
112*e8d8bef9SDimitry Andric           Arg.getName() + ".byval.kernarg.offset");
113*e8d8bef9SDimitry Andric 
114*e8d8bef9SDimitry Andric       Value *CastOffsetPtr = Builder.CreatePointerBitCastOrAddrSpaceCast(
115*e8d8bef9SDimitry Andric           ArgOffsetPtr, Arg.getType());
116*e8d8bef9SDimitry Andric       Arg.replaceAllUsesWith(CastOffsetPtr);
117*e8d8bef9SDimitry Andric       continue;
118*e8d8bef9SDimitry Andric     }
119*e8d8bef9SDimitry Andric 
1200b57cec5SDimitry Andric     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
1210b57cec5SDimitry Andric       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
1220b57cec5SDimitry Andric       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
1230b57cec5SDimitry Andric       // can't represent this with range metadata because it's only allowed for
1240b57cec5SDimitry Andric       // integer types.
1250b57cec5SDimitry Andric       if ((PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1260b57cec5SDimitry Andric            PT->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) &&
1270b57cec5SDimitry Andric           !ST.hasUsableDSOffset())
1280b57cec5SDimitry Andric         continue;
1290b57cec5SDimitry Andric 
1300b57cec5SDimitry Andric       // FIXME: We can replace this with equivalent alias.scope/noalias
1310b57cec5SDimitry Andric       // metadata, but this appears to be a lot of work.
1320b57cec5SDimitry Andric       if (Arg.hasNoAliasAttr())
1330b57cec5SDimitry Andric         continue;
1340b57cec5SDimitry Andric     }
1350b57cec5SDimitry Andric 
1365ffd83dbSDimitry Andric     auto *VT = dyn_cast<FixedVectorType>(ArgTy);
1370b57cec5SDimitry Andric     bool IsV3 = VT && VT->getNumElements() == 3;
1380b57cec5SDimitry Andric     bool DoShiftOpt = Size < 32 && !ArgTy->isAggregateType();
1390b57cec5SDimitry Andric 
1400b57cec5SDimitry Andric     VectorType *V4Ty = nullptr;
1410b57cec5SDimitry Andric 
1420b57cec5SDimitry Andric     int64_t AlignDownOffset = alignDown(EltOffset, 4);
1430b57cec5SDimitry Andric     int64_t OffsetDiff = EltOffset - AlignDownOffset;
1448bcb0991SDimitry Andric     Align AdjustedAlign = commonAlignment(
1458bcb0991SDimitry Andric         KernArgBaseAlign, DoShiftOpt ? AlignDownOffset : EltOffset);
1460b57cec5SDimitry Andric 
1470b57cec5SDimitry Andric     Value *ArgPtr;
1480b57cec5SDimitry Andric     Type *AdjustedArgTy;
1490b57cec5SDimitry Andric     if (DoShiftOpt) { // FIXME: Handle aggregate types
1500b57cec5SDimitry Andric       // Since we don't have sub-dword scalar loads, avoid doing an extload by
1510b57cec5SDimitry Andric       // loading earlier than the argument address, and extracting the relevant
1520b57cec5SDimitry Andric       // bits.
1530b57cec5SDimitry Andric       //
1540b57cec5SDimitry Andric       // Additionally widen any sub-dword load to i32 even if suitably aligned,
1550b57cec5SDimitry Andric       // so that CSE between different argument loads works easily.
1560b57cec5SDimitry Andric       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
1570b57cec5SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, AlignDownOffset,
1580b57cec5SDimitry Andric           Arg.getName() + ".kernarg.offset.align.down");
1590b57cec5SDimitry Andric       AdjustedArgTy = Builder.getInt32Ty();
1600b57cec5SDimitry Andric     } else {
1610b57cec5SDimitry Andric       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
1620b57cec5SDimitry Andric           Builder.getInt8Ty(), KernArgSegment, EltOffset,
1630b57cec5SDimitry Andric           Arg.getName() + ".kernarg.offset");
1640b57cec5SDimitry Andric       AdjustedArgTy = ArgTy;
1650b57cec5SDimitry Andric     }
1660b57cec5SDimitry Andric 
1670b57cec5SDimitry Andric     if (IsV3 && Size >= 32) {
1685ffd83dbSDimitry Andric       V4Ty = FixedVectorType::get(VT->getElementType(), 4);
1690b57cec5SDimitry Andric       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
1700b57cec5SDimitry Andric       AdjustedArgTy = V4Ty;
1710b57cec5SDimitry Andric     }
1720b57cec5SDimitry Andric 
1730b57cec5SDimitry Andric     ArgPtr = Builder.CreateBitCast(ArgPtr, AdjustedArgTy->getPointerTo(AS),
1740b57cec5SDimitry Andric                                    ArgPtr->getName() + ".cast");
1750b57cec5SDimitry Andric     LoadInst *Load =
1765ffd83dbSDimitry Andric         Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign);
1770b57cec5SDimitry Andric     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
1780b57cec5SDimitry Andric 
1790b57cec5SDimitry Andric     MDBuilder MDB(Ctx);
1800b57cec5SDimitry Andric 
1810b57cec5SDimitry Andric     if (isa<PointerType>(ArgTy)) {
1820b57cec5SDimitry Andric       if (Arg.hasNonNullAttr())
1830b57cec5SDimitry Andric         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
1840b57cec5SDimitry Andric 
1850b57cec5SDimitry Andric       uint64_t DerefBytes = Arg.getDereferenceableBytes();
1860b57cec5SDimitry Andric       if (DerefBytes != 0) {
1870b57cec5SDimitry Andric         Load->setMetadata(
1880b57cec5SDimitry Andric           LLVMContext::MD_dereferenceable,
1890b57cec5SDimitry Andric           MDNode::get(Ctx,
1900b57cec5SDimitry Andric                       MDB.createConstant(
1910b57cec5SDimitry Andric                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
1920b57cec5SDimitry Andric       }
1930b57cec5SDimitry Andric 
1940b57cec5SDimitry Andric       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
1950b57cec5SDimitry Andric       if (DerefOrNullBytes != 0) {
1960b57cec5SDimitry Andric         Load->setMetadata(
1970b57cec5SDimitry Andric           LLVMContext::MD_dereferenceable_or_null,
1980b57cec5SDimitry Andric           MDNode::get(Ctx,
1990b57cec5SDimitry Andric                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
2000b57cec5SDimitry Andric                                                           DerefOrNullBytes))));
2010b57cec5SDimitry Andric       }
2020b57cec5SDimitry Andric 
2030b57cec5SDimitry Andric       unsigned ParamAlign = Arg.getParamAlignment();
2040b57cec5SDimitry Andric       if (ParamAlign != 0) {
2050b57cec5SDimitry Andric         Load->setMetadata(
2060b57cec5SDimitry Andric           LLVMContext::MD_align,
2070b57cec5SDimitry Andric           MDNode::get(Ctx,
2080b57cec5SDimitry Andric                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
2090b57cec5SDimitry Andric                                                           ParamAlign))));
2100b57cec5SDimitry Andric       }
2110b57cec5SDimitry Andric     }
2120b57cec5SDimitry Andric 
2130b57cec5SDimitry Andric     // TODO: Convert noalias arg to !noalias
2140b57cec5SDimitry Andric 
2150b57cec5SDimitry Andric     if (DoShiftOpt) {
2160b57cec5SDimitry Andric       Value *ExtractBits = OffsetDiff == 0 ?
2170b57cec5SDimitry Andric         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
2180b57cec5SDimitry Andric 
2190b57cec5SDimitry Andric       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
2200b57cec5SDimitry Andric       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
2210b57cec5SDimitry Andric       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
2220b57cec5SDimitry Andric                                             Arg.getName() + ".load");
2230b57cec5SDimitry Andric       Arg.replaceAllUsesWith(NewVal);
2240b57cec5SDimitry Andric     } else if (IsV3) {
225*e8d8bef9SDimitry Andric       Value *Shuf = Builder.CreateShuffleVector(Load, ArrayRef<int>{0, 1, 2},
2260b57cec5SDimitry Andric                                                 Arg.getName() + ".load");
2270b57cec5SDimitry Andric       Arg.replaceAllUsesWith(Shuf);
2280b57cec5SDimitry Andric     } else {
2290b57cec5SDimitry Andric       Load->setName(Arg.getName() + ".load");
2300b57cec5SDimitry Andric       Arg.replaceAllUsesWith(Load);
2310b57cec5SDimitry Andric     }
2320b57cec5SDimitry Andric   }
2330b57cec5SDimitry Andric 
2340b57cec5SDimitry Andric   KernArgSegment->addAttribute(
2350b57cec5SDimitry Andric       AttributeList::ReturnIndex,
2360b57cec5SDimitry Andric       Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
2370b57cec5SDimitry Andric 
2380b57cec5SDimitry Andric   return true;
2390b57cec5SDimitry Andric }
2400b57cec5SDimitry Andric 
2410b57cec5SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
2420b57cec5SDimitry Andric                       "AMDGPU Lower Kernel Arguments", false, false)
2430b57cec5SDimitry Andric INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
2440b57cec5SDimitry Andric                     false, false)
2450b57cec5SDimitry Andric 
2460b57cec5SDimitry Andric char AMDGPULowerKernelArguments::ID = 0;
2470b57cec5SDimitry Andric 
2480b57cec5SDimitry Andric FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
2490b57cec5SDimitry Andric   return new AMDGPULowerKernelArguments();
2500b57cec5SDimitry Andric }
251