1*0b57cec5SDimitry Andric //===-- NVPTXLowerArgs.cpp - Lower arguments ------------------------------===// 2*0b57cec5SDimitry Andric // 3*0b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*0b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*0b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*0b57cec5SDimitry Andric // 7*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 8*0b57cec5SDimitry Andric // 9*0b57cec5SDimitry Andric // 10*0b57cec5SDimitry Andric // Arguments to kernel and device functions are passed via param space, 11*0b57cec5SDimitry Andric // which imposes certain restrictions: 12*0b57cec5SDimitry Andric // http://docs.nvidia.com/cuda/parallel-thread-execution/#state-spaces 13*0b57cec5SDimitry Andric // 14*0b57cec5SDimitry Andric // Kernel parameters are read-only and accessible only via ld.param 15*0b57cec5SDimitry Andric // instruction, directly or via a pointer. Pointers to kernel 16*0b57cec5SDimitry Andric // arguments can't be converted to generic address space. 17*0b57cec5SDimitry Andric // 18*0b57cec5SDimitry Andric // Device function parameters are directly accessible via 19*0b57cec5SDimitry Andric // ld.param/st.param, but taking the address of one returns a pointer 20*0b57cec5SDimitry Andric // to a copy created in local space which *can't* be used with 21*0b57cec5SDimitry Andric // ld.param/st.param. 22*0b57cec5SDimitry Andric // 23*0b57cec5SDimitry Andric // Copying a byval struct into local memory in IR allows us to enforce 24*0b57cec5SDimitry Andric // the param space restrictions, gives the rest of IR a pointer w/o 25*0b57cec5SDimitry Andric // param space restrictions, and gives us an opportunity to eliminate 26*0b57cec5SDimitry Andric // the copy. 27*0b57cec5SDimitry Andric // 28*0b57cec5SDimitry Andric // Pointer arguments to kernel functions need more work to be lowered: 29*0b57cec5SDimitry Andric // 30*0b57cec5SDimitry Andric // 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the 31*0b57cec5SDimitry Andric // global address space. This allows later optimizations to emit 32*0b57cec5SDimitry Andric // ld.global.*/st.global.* for accessing these pointer arguments. For 33*0b57cec5SDimitry Andric // example, 34*0b57cec5SDimitry Andric // 35*0b57cec5SDimitry Andric // define void @foo(float* %input) { 36*0b57cec5SDimitry Andric // %v = load float, float* %input, align 4 37*0b57cec5SDimitry Andric // ... 38*0b57cec5SDimitry Andric // } 39*0b57cec5SDimitry Andric // 40*0b57cec5SDimitry Andric // becomes 41*0b57cec5SDimitry Andric // 42*0b57cec5SDimitry Andric // define void @foo(float* %input) { 43*0b57cec5SDimitry Andric // %input2 = addrspacecast float* %input to float addrspace(1)* 44*0b57cec5SDimitry Andric // %input3 = addrspacecast float addrspace(1)* %input2 to float* 45*0b57cec5SDimitry Andric // %v = load float, float* %input3, align 4 46*0b57cec5SDimitry Andric // ... 47*0b57cec5SDimitry Andric // } 48*0b57cec5SDimitry Andric // 49*0b57cec5SDimitry Andric // Later, NVPTXInferAddressSpaces will optimize it to 50*0b57cec5SDimitry Andric // 51*0b57cec5SDimitry Andric // define void @foo(float* %input) { 52*0b57cec5SDimitry Andric // %input2 = addrspacecast float* %input to float addrspace(1)* 53*0b57cec5SDimitry Andric // %v = load float, float addrspace(1)* %input2, align 4 54*0b57cec5SDimitry Andric // ... 55*0b57cec5SDimitry Andric // } 56*0b57cec5SDimitry Andric // 57*0b57cec5SDimitry Andric // 2. Convert pointers in a byval kernel parameter to pointers in the global 58*0b57cec5SDimitry Andric // address space. As #2, it allows NVPTX to emit more ld/st.global. E.g., 59*0b57cec5SDimitry Andric // 60*0b57cec5SDimitry Andric // struct S { 61*0b57cec5SDimitry Andric // int *x; 62*0b57cec5SDimitry Andric // int *y; 63*0b57cec5SDimitry Andric // }; 64*0b57cec5SDimitry Andric // __global__ void foo(S s) { 65*0b57cec5SDimitry Andric // int *b = s.y; 66*0b57cec5SDimitry Andric // // use b 67*0b57cec5SDimitry Andric // } 68*0b57cec5SDimitry Andric // 69*0b57cec5SDimitry Andric // "b" points to the global address space. In the IR level, 70*0b57cec5SDimitry Andric // 71*0b57cec5SDimitry Andric // define void @foo({i32*, i32*}* byval %input) { 72*0b57cec5SDimitry Andric // %b_ptr = getelementptr {i32*, i32*}, {i32*, i32*}* %input, i64 0, i32 1 73*0b57cec5SDimitry Andric // %b = load i32*, i32** %b_ptr 74*0b57cec5SDimitry Andric // ; use %b 75*0b57cec5SDimitry Andric // } 76*0b57cec5SDimitry Andric // 77*0b57cec5SDimitry Andric // becomes 78*0b57cec5SDimitry Andric // 79*0b57cec5SDimitry Andric // define void @foo({i32*, i32*}* byval %input) { 80*0b57cec5SDimitry Andric // %b_ptr = getelementptr {i32*, i32*}, {i32*, i32*}* %input, i64 0, i32 1 81*0b57cec5SDimitry Andric // %b = load i32*, i32** %b_ptr 82*0b57cec5SDimitry Andric // %b_global = addrspacecast i32* %b to i32 addrspace(1)* 83*0b57cec5SDimitry Andric // %b_generic = addrspacecast i32 addrspace(1)* %b_global to i32* 84*0b57cec5SDimitry Andric // ; use %b_generic 85*0b57cec5SDimitry Andric // } 86*0b57cec5SDimitry Andric // 87*0b57cec5SDimitry Andric // TODO: merge this pass with NVPTXInferAddressSpaces so that other passes don't 88*0b57cec5SDimitry Andric // cancel the addrspacecast pair this pass emits. 89*0b57cec5SDimitry Andric //===----------------------------------------------------------------------===// 90*0b57cec5SDimitry Andric 91*0b57cec5SDimitry Andric #include "NVPTX.h" 92*0b57cec5SDimitry Andric #include "NVPTXTargetMachine.h" 93*0b57cec5SDimitry Andric #include "NVPTXUtilities.h" 94*0b57cec5SDimitry Andric #include "MCTargetDesc/NVPTXBaseInfo.h" 95*0b57cec5SDimitry Andric #include "llvm/Analysis/ValueTracking.h" 96*0b57cec5SDimitry Andric #include "llvm/IR/Function.h" 97*0b57cec5SDimitry Andric #include "llvm/IR/Instructions.h" 98*0b57cec5SDimitry Andric #include "llvm/IR/Module.h" 99*0b57cec5SDimitry Andric #include "llvm/IR/Type.h" 100*0b57cec5SDimitry Andric #include "llvm/Pass.h" 101*0b57cec5SDimitry Andric 102*0b57cec5SDimitry Andric using namespace llvm; 103*0b57cec5SDimitry Andric 104*0b57cec5SDimitry Andric namespace llvm { 105*0b57cec5SDimitry Andric void initializeNVPTXLowerArgsPass(PassRegistry &); 106*0b57cec5SDimitry Andric } 107*0b57cec5SDimitry Andric 108*0b57cec5SDimitry Andric namespace { 109*0b57cec5SDimitry Andric class NVPTXLowerArgs : public FunctionPass { 110*0b57cec5SDimitry Andric bool runOnFunction(Function &F) override; 111*0b57cec5SDimitry Andric 112*0b57cec5SDimitry Andric bool runOnKernelFunction(Function &F); 113*0b57cec5SDimitry Andric bool runOnDeviceFunction(Function &F); 114*0b57cec5SDimitry Andric 115*0b57cec5SDimitry Andric // handle byval parameters 116*0b57cec5SDimitry Andric void handleByValParam(Argument *Arg); 117*0b57cec5SDimitry Andric // Knowing Ptr must point to the global address space, this function 118*0b57cec5SDimitry Andric // addrspacecasts Ptr to global and then back to generic. This allows 119*0b57cec5SDimitry Andric // NVPTXInferAddressSpaces to fold the global-to-generic cast into 120*0b57cec5SDimitry Andric // loads/stores that appear later. 121*0b57cec5SDimitry Andric void markPointerAsGlobal(Value *Ptr); 122*0b57cec5SDimitry Andric 123*0b57cec5SDimitry Andric public: 124*0b57cec5SDimitry Andric static char ID; // Pass identification, replacement for typeid 125*0b57cec5SDimitry Andric NVPTXLowerArgs(const NVPTXTargetMachine *TM = nullptr) 126*0b57cec5SDimitry Andric : FunctionPass(ID), TM(TM) {} 127*0b57cec5SDimitry Andric StringRef getPassName() const override { 128*0b57cec5SDimitry Andric return "Lower pointer arguments of CUDA kernels"; 129*0b57cec5SDimitry Andric } 130*0b57cec5SDimitry Andric 131*0b57cec5SDimitry Andric private: 132*0b57cec5SDimitry Andric const NVPTXTargetMachine *TM; 133*0b57cec5SDimitry Andric }; 134*0b57cec5SDimitry Andric } // namespace 135*0b57cec5SDimitry Andric 136*0b57cec5SDimitry Andric char NVPTXLowerArgs::ID = 1; 137*0b57cec5SDimitry Andric 138*0b57cec5SDimitry Andric INITIALIZE_PASS(NVPTXLowerArgs, "nvptx-lower-args", 139*0b57cec5SDimitry Andric "Lower arguments (NVPTX)", false, false) 140*0b57cec5SDimitry Andric 141*0b57cec5SDimitry Andric // ============================================================================= 142*0b57cec5SDimitry Andric // If the function had a byval struct ptr arg, say foo(%struct.x* byval %d), 143*0b57cec5SDimitry Andric // then add the following instructions to the first basic block: 144*0b57cec5SDimitry Andric // 145*0b57cec5SDimitry Andric // %temp = alloca %struct.x, align 8 146*0b57cec5SDimitry Andric // %tempd = addrspacecast %struct.x* %d to %struct.x addrspace(101)* 147*0b57cec5SDimitry Andric // %tv = load %struct.x addrspace(101)* %tempd 148*0b57cec5SDimitry Andric // store %struct.x %tv, %struct.x* %temp, align 8 149*0b57cec5SDimitry Andric // 150*0b57cec5SDimitry Andric // The above code allocates some space in the stack and copies the incoming 151*0b57cec5SDimitry Andric // struct from param space to local space. 152*0b57cec5SDimitry Andric // Then replace all occurrences of %d by %temp. 153*0b57cec5SDimitry Andric // ============================================================================= 154*0b57cec5SDimitry Andric void NVPTXLowerArgs::handleByValParam(Argument *Arg) { 155*0b57cec5SDimitry Andric Function *Func = Arg->getParent(); 156*0b57cec5SDimitry Andric Instruction *FirstInst = &(Func->getEntryBlock().front()); 157*0b57cec5SDimitry Andric PointerType *PType = dyn_cast<PointerType>(Arg->getType()); 158*0b57cec5SDimitry Andric 159*0b57cec5SDimitry Andric assert(PType && "Expecting pointer type in handleByValParam"); 160*0b57cec5SDimitry Andric 161*0b57cec5SDimitry Andric Type *StructType = PType->getElementType(); 162*0b57cec5SDimitry Andric unsigned AS = Func->getParent()->getDataLayout().getAllocaAddrSpace(); 163*0b57cec5SDimitry Andric AllocaInst *AllocA = new AllocaInst(StructType, AS, Arg->getName(), FirstInst); 164*0b57cec5SDimitry Andric // Set the alignment to alignment of the byval parameter. This is because, 165*0b57cec5SDimitry Andric // later load/stores assume that alignment, and we are going to replace 166*0b57cec5SDimitry Andric // the use of the byval parameter with this alloca instruction. 167*0b57cec5SDimitry Andric AllocA->setAlignment(Func->getParamAlignment(Arg->getArgNo())); 168*0b57cec5SDimitry Andric Arg->replaceAllUsesWith(AllocA); 169*0b57cec5SDimitry Andric 170*0b57cec5SDimitry Andric Value *ArgInParam = new AddrSpaceCastInst( 171*0b57cec5SDimitry Andric Arg, PointerType::get(StructType, ADDRESS_SPACE_PARAM), Arg->getName(), 172*0b57cec5SDimitry Andric FirstInst); 173*0b57cec5SDimitry Andric LoadInst *LI = 174*0b57cec5SDimitry Andric new LoadInst(StructType, ArgInParam, Arg->getName(), FirstInst); 175*0b57cec5SDimitry Andric new StoreInst(LI, AllocA, FirstInst); 176*0b57cec5SDimitry Andric } 177*0b57cec5SDimitry Andric 178*0b57cec5SDimitry Andric void NVPTXLowerArgs::markPointerAsGlobal(Value *Ptr) { 179*0b57cec5SDimitry Andric if (Ptr->getType()->getPointerAddressSpace() == ADDRESS_SPACE_GLOBAL) 180*0b57cec5SDimitry Andric return; 181*0b57cec5SDimitry Andric 182*0b57cec5SDimitry Andric // Deciding where to emit the addrspacecast pair. 183*0b57cec5SDimitry Andric BasicBlock::iterator InsertPt; 184*0b57cec5SDimitry Andric if (Argument *Arg = dyn_cast<Argument>(Ptr)) { 185*0b57cec5SDimitry Andric // Insert at the functon entry if Ptr is an argument. 186*0b57cec5SDimitry Andric InsertPt = Arg->getParent()->getEntryBlock().begin(); 187*0b57cec5SDimitry Andric } else { 188*0b57cec5SDimitry Andric // Insert right after Ptr if Ptr is an instruction. 189*0b57cec5SDimitry Andric InsertPt = ++cast<Instruction>(Ptr)->getIterator(); 190*0b57cec5SDimitry Andric assert(InsertPt != InsertPt->getParent()->end() && 191*0b57cec5SDimitry Andric "We don't call this function with Ptr being a terminator."); 192*0b57cec5SDimitry Andric } 193*0b57cec5SDimitry Andric 194*0b57cec5SDimitry Andric Instruction *PtrInGlobal = new AddrSpaceCastInst( 195*0b57cec5SDimitry Andric Ptr, PointerType::get(Ptr->getType()->getPointerElementType(), 196*0b57cec5SDimitry Andric ADDRESS_SPACE_GLOBAL), 197*0b57cec5SDimitry Andric Ptr->getName(), &*InsertPt); 198*0b57cec5SDimitry Andric Value *PtrInGeneric = new AddrSpaceCastInst(PtrInGlobal, Ptr->getType(), 199*0b57cec5SDimitry Andric Ptr->getName(), &*InsertPt); 200*0b57cec5SDimitry Andric // Replace with PtrInGeneric all uses of Ptr except PtrInGlobal. 201*0b57cec5SDimitry Andric Ptr->replaceAllUsesWith(PtrInGeneric); 202*0b57cec5SDimitry Andric PtrInGlobal->setOperand(0, Ptr); 203*0b57cec5SDimitry Andric } 204*0b57cec5SDimitry Andric 205*0b57cec5SDimitry Andric // ============================================================================= 206*0b57cec5SDimitry Andric // Main function for this pass. 207*0b57cec5SDimitry Andric // ============================================================================= 208*0b57cec5SDimitry Andric bool NVPTXLowerArgs::runOnKernelFunction(Function &F) { 209*0b57cec5SDimitry Andric if (TM && TM->getDrvInterface() == NVPTX::CUDA) { 210*0b57cec5SDimitry Andric // Mark pointers in byval structs as global. 211*0b57cec5SDimitry Andric for (auto &B : F) { 212*0b57cec5SDimitry Andric for (auto &I : B) { 213*0b57cec5SDimitry Andric if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 214*0b57cec5SDimitry Andric if (LI->getType()->isPointerTy()) { 215*0b57cec5SDimitry Andric Value *UO = GetUnderlyingObject(LI->getPointerOperand(), 216*0b57cec5SDimitry Andric F.getParent()->getDataLayout()); 217*0b57cec5SDimitry Andric if (Argument *Arg = dyn_cast<Argument>(UO)) { 218*0b57cec5SDimitry Andric if (Arg->hasByValAttr()) { 219*0b57cec5SDimitry Andric // LI is a load from a pointer within a byval kernel parameter. 220*0b57cec5SDimitry Andric markPointerAsGlobal(LI); 221*0b57cec5SDimitry Andric } 222*0b57cec5SDimitry Andric } 223*0b57cec5SDimitry Andric } 224*0b57cec5SDimitry Andric } 225*0b57cec5SDimitry Andric } 226*0b57cec5SDimitry Andric } 227*0b57cec5SDimitry Andric } 228*0b57cec5SDimitry Andric 229*0b57cec5SDimitry Andric for (Argument &Arg : F.args()) { 230*0b57cec5SDimitry Andric if (Arg.getType()->isPointerTy()) { 231*0b57cec5SDimitry Andric if (Arg.hasByValAttr()) 232*0b57cec5SDimitry Andric handleByValParam(&Arg); 233*0b57cec5SDimitry Andric else if (TM && TM->getDrvInterface() == NVPTX::CUDA) 234*0b57cec5SDimitry Andric markPointerAsGlobal(&Arg); 235*0b57cec5SDimitry Andric } 236*0b57cec5SDimitry Andric } 237*0b57cec5SDimitry Andric return true; 238*0b57cec5SDimitry Andric } 239*0b57cec5SDimitry Andric 240*0b57cec5SDimitry Andric // Device functions only need to copy byval args into local memory. 241*0b57cec5SDimitry Andric bool NVPTXLowerArgs::runOnDeviceFunction(Function &F) { 242*0b57cec5SDimitry Andric for (Argument &Arg : F.args()) 243*0b57cec5SDimitry Andric if (Arg.getType()->isPointerTy() && Arg.hasByValAttr()) 244*0b57cec5SDimitry Andric handleByValParam(&Arg); 245*0b57cec5SDimitry Andric return true; 246*0b57cec5SDimitry Andric } 247*0b57cec5SDimitry Andric 248*0b57cec5SDimitry Andric bool NVPTXLowerArgs::runOnFunction(Function &F) { 249*0b57cec5SDimitry Andric return isKernelFunction(F) ? runOnKernelFunction(F) : runOnDeviceFunction(F); 250*0b57cec5SDimitry Andric } 251*0b57cec5SDimitry Andric 252*0b57cec5SDimitry Andric FunctionPass * 253*0b57cec5SDimitry Andric llvm::createNVPTXLowerArgsPass(const NVPTXTargetMachine *TM) { 254*0b57cec5SDimitry Andric return new NVPTXLowerArgs(TM); 255*0b57cec5SDimitry Andric } 256