1 //===- AMDGPUPerfHintAnalysis.cpp - analysis of functions memory traffic --===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// \brief Analyzes if a function potentially memory bound and if a kernel 11 /// kernel may benefit from limiting number of waves to reduce cache thrashing. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPU.h" 16 #include "AMDGPUPerfHintAnalysis.h" 17 #include "Utils/AMDGPUBaseInfo.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/CallGraph.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/TargetLowering.h" 23 #include "llvm/CodeGen/TargetPassConfig.h" 24 #include "llvm/CodeGen/TargetSubtargetInfo.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Target/TargetMachine.h" 29 30 using namespace llvm; 31 32 #define DEBUG_TYPE "amdgpu-perf-hint" 33 34 static cl::opt<unsigned> 35 MemBoundThresh("amdgpu-membound-threshold", cl::init(50), cl::Hidden, 36 cl::desc("Function mem bound threshold in %")); 37 38 static cl::opt<unsigned> 39 LimitWaveThresh("amdgpu-limit-wave-threshold", cl::init(50), cl::Hidden, 40 cl::desc("Kernel limit wave threshold in %")); 41 42 static cl::opt<unsigned> 43 IAWeight("amdgpu-indirect-access-weight", cl::init(1000), cl::Hidden, 44 cl::desc("Indirect access memory instruction weight")); 45 46 static cl::opt<unsigned> 47 LSWeight("amdgpu-large-stride-weight", cl::init(1000), cl::Hidden, 48 cl::desc("Large stride memory access weight")); 49 50 static cl::opt<unsigned> 51 LargeStrideThresh("amdgpu-large-stride-threshold", cl::init(64), cl::Hidden, 52 cl::desc("Large stride memory access threshold")); 53 54 STATISTIC(NumMemBound, "Number of functions marked as memory bound"); 55 STATISTIC(NumLimitWave, "Number of functions marked as needing limit wave"); 56 57 char llvm::AMDGPUPerfHintAnalysis::ID = 0; 58 char &llvm::AMDGPUPerfHintAnalysisID = AMDGPUPerfHintAnalysis::ID; 59 60 INITIALIZE_PASS(AMDGPUPerfHintAnalysis, DEBUG_TYPE, 61 "Analysis if a function is memory bound", true, true) 62 63 namespace { 64 65 struct AMDGPUPerfHint { 66 friend AMDGPUPerfHintAnalysis; 67 68 public: 69 AMDGPUPerfHint(AMDGPUPerfHintAnalysis::FuncInfoMap &FIM_, 70 const TargetLowering *TLI_) 71 : FIM(FIM_), DL(nullptr), TLI(TLI_) {} 72 73 bool runOnFunction(Function &F); 74 75 private: 76 struct MemAccessInfo { 77 const Value *V; 78 const Value *Base; 79 int64_t Offset; 80 MemAccessInfo() : V(nullptr), Base(nullptr), Offset(0) {} 81 bool isLargeStride(MemAccessInfo &Reference) const; 82 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 83 Printable print() const { 84 return Printable([this](raw_ostream &OS) { 85 OS << "Value: " << *V << '\n' 86 << "Base: " << *Base << " Offset: " << Offset << '\n'; 87 }); 88 } 89 #endif 90 }; 91 92 MemAccessInfo makeMemAccessInfo(Instruction *) const; 93 94 MemAccessInfo LastAccess; // Last memory access info 95 96 AMDGPUPerfHintAnalysis::FuncInfoMap &FIM; 97 98 const DataLayout *DL; 99 100 const TargetLowering *TLI; 101 102 AMDGPUPerfHintAnalysis::FuncInfo *visit(const Function &F); 103 static bool isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &F); 104 static bool needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &F); 105 106 bool isIndirectAccess(const Instruction *Inst) const; 107 108 /// Check if the instruction is large stride. 109 /// The purpose is to identify memory access pattern like: 110 /// x = a[i]; 111 /// y = a[i+1000]; 112 /// z = a[i+2000]; 113 /// In the above example, the second and third memory access will be marked 114 /// large stride memory access. 115 bool isLargeStride(const Instruction *Inst); 116 117 bool isGlobalAddr(const Value *V) const; 118 bool isLocalAddr(const Value *V) const; 119 bool isConstantAddr(const Value *V) const; 120 }; 121 122 static std::pair<const Value *, const Type *> getMemoryInstrPtrAndType( 123 const Instruction *Inst) { 124 if (auto LI = dyn_cast<LoadInst>(Inst)) 125 return {LI->getPointerOperand(), LI->getType()}; 126 if (auto SI = dyn_cast<StoreInst>(Inst)) 127 return {SI->getPointerOperand(), SI->getValueOperand()->getType()}; 128 if (auto AI = dyn_cast<AtomicCmpXchgInst>(Inst)) 129 return {AI->getPointerOperand(), AI->getCompareOperand()->getType()}; 130 if (auto AI = dyn_cast<AtomicRMWInst>(Inst)) 131 return {AI->getPointerOperand(), AI->getValOperand()->getType()}; 132 if (auto MI = dyn_cast<AnyMemIntrinsic>(Inst)) 133 return {MI->getRawDest(), Type::getInt8Ty(MI->getContext())}; 134 135 return {nullptr, nullptr}; 136 } 137 138 bool AMDGPUPerfHint::isIndirectAccess(const Instruction *Inst) const { 139 LLVM_DEBUG(dbgs() << "[isIndirectAccess] " << *Inst << '\n'); 140 SmallSet<const Value *, 32> WorkSet; 141 SmallSet<const Value *, 32> Visited; 142 if (const Value *MO = getMemoryInstrPtrAndType(Inst).first) { 143 if (isGlobalAddr(MO)) 144 WorkSet.insert(MO); 145 } 146 147 while (!WorkSet.empty()) { 148 const Value *V = *WorkSet.begin(); 149 WorkSet.erase(*WorkSet.begin()); 150 if (!Visited.insert(V).second) 151 continue; 152 LLVM_DEBUG(dbgs() << " check: " << *V << '\n'); 153 154 if (auto LD = dyn_cast<LoadInst>(V)) { 155 auto M = LD->getPointerOperand(); 156 if (isGlobalAddr(M) || isLocalAddr(M) || isConstantAddr(M)) { 157 LLVM_DEBUG(dbgs() << " is IA\n"); 158 return true; 159 } 160 continue; 161 } 162 163 if (auto GEP = dyn_cast<GetElementPtrInst>(V)) { 164 auto P = GEP->getPointerOperand(); 165 WorkSet.insert(P); 166 for (unsigned I = 1, E = GEP->getNumIndices() + 1; I != E; ++I) 167 WorkSet.insert(GEP->getOperand(I)); 168 continue; 169 } 170 171 if (auto U = dyn_cast<UnaryInstruction>(V)) { 172 WorkSet.insert(U->getOperand(0)); 173 continue; 174 } 175 176 if (auto BO = dyn_cast<BinaryOperator>(V)) { 177 WorkSet.insert(BO->getOperand(0)); 178 WorkSet.insert(BO->getOperand(1)); 179 continue; 180 } 181 182 if (auto S = dyn_cast<SelectInst>(V)) { 183 WorkSet.insert(S->getFalseValue()); 184 WorkSet.insert(S->getTrueValue()); 185 continue; 186 } 187 188 if (auto E = dyn_cast<ExtractElementInst>(V)) { 189 WorkSet.insert(E->getVectorOperand()); 190 continue; 191 } 192 193 LLVM_DEBUG(dbgs() << " dropped\n"); 194 } 195 196 LLVM_DEBUG(dbgs() << " is not IA\n"); 197 return false; 198 } 199 200 AMDGPUPerfHintAnalysis::FuncInfo *AMDGPUPerfHint::visit(const Function &F) { 201 AMDGPUPerfHintAnalysis::FuncInfo &FI = FIM[&F]; 202 203 LLVM_DEBUG(dbgs() << "[AMDGPUPerfHint] process " << F.getName() << '\n'); 204 205 for (auto &B : F) { 206 LastAccess = MemAccessInfo(); 207 for (auto &I : B) { 208 if (const Type *Ty = getMemoryInstrPtrAndType(&I).second) { 209 unsigned Size = divideCeil(Ty->getPrimitiveSizeInBits(), 32); 210 if (isIndirectAccess(&I)) 211 FI.IAMInstCost += Size; 212 if (isLargeStride(&I)) 213 FI.LSMInstCost += Size; 214 FI.MemInstCost += Size; 215 FI.InstCost += Size; 216 continue; 217 } 218 if (auto *CB = dyn_cast<CallBase>(&I)) { 219 Function *Callee = CB->getCalledFunction(); 220 if (!Callee || Callee->isDeclaration()) { 221 ++FI.InstCost; 222 continue; 223 } 224 if (&F == Callee) // Handle immediate recursion 225 continue; 226 227 auto Loc = FIM.find(Callee); 228 if (Loc == FIM.end()) 229 continue; 230 231 FI.MemInstCost += Loc->second.MemInstCost; 232 FI.InstCost += Loc->second.InstCost; 233 FI.IAMInstCost += Loc->second.IAMInstCost; 234 FI.LSMInstCost += Loc->second.LSMInstCost; 235 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 236 TargetLoweringBase::AddrMode AM; 237 auto *Ptr = GetPointerBaseWithConstantOffset(GEP, AM.BaseOffs, *DL); 238 AM.BaseGV = dyn_cast_or_null<GlobalValue>(const_cast<Value *>(Ptr)); 239 AM.HasBaseReg = !AM.BaseGV; 240 if (TLI->isLegalAddressingMode(*DL, AM, GEP->getResultElementType(), 241 GEP->getPointerAddressSpace())) 242 // Offset will likely be folded into load or store 243 continue; 244 ++FI.InstCost; 245 } else { 246 ++FI.InstCost; 247 } 248 } 249 } 250 251 return &FI; 252 } 253 254 bool AMDGPUPerfHint::runOnFunction(Function &F) { 255 const Module &M = *F.getParent(); 256 DL = &M.getDataLayout(); 257 258 if (F.hasFnAttribute("amdgpu-wave-limiter") && 259 F.hasFnAttribute("amdgpu-memory-bound")) 260 return false; 261 262 const AMDGPUPerfHintAnalysis::FuncInfo *Info = visit(F); 263 264 LLVM_DEBUG(dbgs() << F.getName() << " MemInst cost: " << Info->MemInstCost 265 << '\n' 266 << " IAMInst cost: " << Info->IAMInstCost << '\n' 267 << " LSMInst cost: " << Info->LSMInstCost << '\n' 268 << " TotalInst cost: " << Info->InstCost << '\n'); 269 270 if (isMemBound(*Info)) { 271 LLVM_DEBUG(dbgs() << F.getName() << " is memory bound\n"); 272 NumMemBound++; 273 F.addFnAttr("amdgpu-memory-bound", "true"); 274 } 275 276 if (AMDGPU::isEntryFunctionCC(F.getCallingConv()) && needLimitWave(*Info)) { 277 LLVM_DEBUG(dbgs() << F.getName() << " needs limit wave\n"); 278 NumLimitWave++; 279 F.addFnAttr("amdgpu-wave-limiter", "true"); 280 } 281 282 return true; 283 } 284 285 bool AMDGPUPerfHint::isMemBound(const AMDGPUPerfHintAnalysis::FuncInfo &FI) { 286 return FI.MemInstCost * 100 / FI.InstCost > MemBoundThresh; 287 } 288 289 bool AMDGPUPerfHint::needLimitWave(const AMDGPUPerfHintAnalysis::FuncInfo &FI) { 290 return ((FI.MemInstCost + FI.IAMInstCost * IAWeight + 291 FI.LSMInstCost * LSWeight) * 100 / FI.InstCost) > LimitWaveThresh; 292 } 293 294 bool AMDGPUPerfHint::isGlobalAddr(const Value *V) const { 295 if (auto PT = dyn_cast<PointerType>(V->getType())) { 296 unsigned As = PT->getAddressSpace(); 297 // Flat likely points to global too. 298 return As == AMDGPUAS::GLOBAL_ADDRESS || As == AMDGPUAS::FLAT_ADDRESS; 299 } 300 return false; 301 } 302 303 bool AMDGPUPerfHint::isLocalAddr(const Value *V) const { 304 if (auto PT = dyn_cast<PointerType>(V->getType())) 305 return PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; 306 return false; 307 } 308 309 bool AMDGPUPerfHint::isLargeStride(const Instruction *Inst) { 310 LLVM_DEBUG(dbgs() << "[isLargeStride] " << *Inst << '\n'); 311 312 MemAccessInfo MAI = makeMemAccessInfo(const_cast<Instruction *>(Inst)); 313 bool IsLargeStride = MAI.isLargeStride(LastAccess); 314 if (MAI.Base) 315 LastAccess = std::move(MAI); 316 317 return IsLargeStride; 318 } 319 320 AMDGPUPerfHint::MemAccessInfo 321 AMDGPUPerfHint::makeMemAccessInfo(Instruction *Inst) const { 322 MemAccessInfo MAI; 323 const Value *MO = getMemoryInstrPtrAndType(Inst).first; 324 325 LLVM_DEBUG(dbgs() << "[isLargeStride] MO: " << *MO << '\n'); 326 // Do not treat local-addr memory access as large stride. 327 if (isLocalAddr(MO)) 328 return MAI; 329 330 MAI.V = MO; 331 MAI.Base = GetPointerBaseWithConstantOffset(MO, MAI.Offset, *DL); 332 return MAI; 333 } 334 335 bool AMDGPUPerfHint::isConstantAddr(const Value *V) const { 336 if (auto PT = dyn_cast<PointerType>(V->getType())) { 337 unsigned As = PT->getAddressSpace(); 338 return As == AMDGPUAS::CONSTANT_ADDRESS || 339 As == AMDGPUAS::CONSTANT_ADDRESS_32BIT; 340 } 341 return false; 342 } 343 344 bool AMDGPUPerfHint::MemAccessInfo::isLargeStride( 345 MemAccessInfo &Reference) const { 346 347 if (!Base || !Reference.Base || Base != Reference.Base) 348 return false; 349 350 uint64_t Diff = Offset > Reference.Offset ? Offset - Reference.Offset 351 : Reference.Offset - Offset; 352 bool Result = Diff > LargeStrideThresh; 353 LLVM_DEBUG(dbgs() << "[isLargeStride compare]\n" 354 << print() << "<=>\n" 355 << Reference.print() << "Result:" << Result << '\n'); 356 return Result; 357 } 358 } // namespace 359 360 bool AMDGPUPerfHintAnalysis::runOnSCC(CallGraphSCC &SCC) { 361 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); 362 if (!TPC) 363 return false; 364 365 const TargetMachine &TM = TPC->getTM<TargetMachine>(); 366 367 bool Changed = false; 368 for (CallGraphNode *I : SCC) { 369 Function *F = I->getFunction(); 370 if (!F || F->isDeclaration()) 371 continue; 372 373 const TargetSubtargetInfo *ST = TM.getSubtargetImpl(*F); 374 AMDGPUPerfHint Analyzer(FIM, ST->getTargetLowering()); 375 376 if (Analyzer.runOnFunction(*F)) 377 Changed = true; 378 } 379 380 return Changed; 381 } 382 383 bool AMDGPUPerfHintAnalysis::isMemoryBound(const Function *F) const { 384 auto FI = FIM.find(F); 385 if (FI == FIM.end()) 386 return false; 387 388 return AMDGPUPerfHint::isMemBound(FI->second); 389 } 390 391 bool AMDGPUPerfHintAnalysis::needsWaveLimiter(const Function *F) const { 392 auto FI = FIM.find(F); 393 if (FI == FIM.end()) 394 return false; 395 396 return AMDGPUPerfHint::needLimitWave(FI->second); 397 } 398