1 //===- LoopLoadElimination.cpp - Loop Load Elimination Pass ---------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implement a loop-aware load elimination pass. 10 // 11 // It uses LoopAccessAnalysis to identify loop-carried dependences with a 12 // distance of one between stores and loads. These form the candidates for the 13 // transformation. The source value of each store then propagated to the user 14 // of the corresponding load. This makes the load dead. 15 // 16 // The pass can also version the loop and add memchecks in order to prove that 17 // may-aliasing stores can't change the value in memory before it's read by the 18 // load. 19 // 20 //===----------------------------------------------------------------------===// 21 22 #include "llvm/Transforms/Scalar/LoopLoadElimination.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/DepthFirstIterator.h" 26 #include "llvm/ADT/STLExtras.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/Statistic.h" 30 #include "llvm/Analysis/AliasAnalysis.h" 31 #include "llvm/Analysis/AssumptionCache.h" 32 #include "llvm/Analysis/BlockFrequencyInfo.h" 33 #include "llvm/Analysis/GlobalsModRef.h" 34 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 35 #include "llvm/Analysis/LoopAccessAnalysis.h" 36 #include "llvm/Analysis/LoopAnalysisManager.h" 37 #include "llvm/Analysis/LoopInfo.h" 38 #include "llvm/Analysis/MemorySSA.h" 39 #include "llvm/Analysis/ProfileSummaryInfo.h" 40 #include "llvm/Analysis/ScalarEvolution.h" 41 #include "llvm/Analysis/ScalarEvolutionExpander.h" 42 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 43 #include "llvm/Analysis/TargetLibraryInfo.h" 44 #include "llvm/Analysis/TargetTransformInfo.h" 45 #include "llvm/IR/DataLayout.h" 46 #include "llvm/IR/Dominators.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/Module.h" 49 #include "llvm/IR/PassManager.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Transforms/Utils.h" 59 #include "llvm/Transforms/Utils/LoopVersioning.h" 60 #include "llvm/Transforms/Utils/SizeOpts.h" 61 #include <algorithm> 62 #include <cassert> 63 #include <forward_list> 64 #include <set> 65 #include <tuple> 66 #include <utility> 67 68 using namespace llvm; 69 70 #define LLE_OPTION "loop-load-elim" 71 #define DEBUG_TYPE LLE_OPTION 72 73 static cl::opt<unsigned> CheckPerElim( 74 "runtime-check-per-loop-load-elim", cl::Hidden, 75 cl::desc("Max number of memchecks allowed per eliminated load on average"), 76 cl::init(1)); 77 78 static cl::opt<unsigned> LoadElimSCEVCheckThreshold( 79 "loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden, 80 cl::desc("The maximum number of SCEV checks allowed for Loop " 81 "Load Elimination")); 82 83 STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE"); 84 85 namespace { 86 87 /// Represent a store-to-forwarding candidate. 88 struct StoreToLoadForwardingCandidate { 89 LoadInst *Load; 90 StoreInst *Store; 91 92 StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store) 93 : Load(Load), Store(Store) {} 94 95 /// Return true if the dependence from the store to the load has a 96 /// distance of one. E.g. A[i+1] = A[i] 97 bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE, 98 Loop *L) const { 99 Value *LoadPtr = Load->getPointerOperand(); 100 Value *StorePtr = Store->getPointerOperand(); 101 Type *LoadPtrType = LoadPtr->getType(); 102 Type *LoadType = LoadPtrType->getPointerElementType(); 103 104 assert(LoadPtrType->getPointerAddressSpace() == 105 StorePtr->getType()->getPointerAddressSpace() && 106 LoadType == StorePtr->getType()->getPointerElementType() && 107 "Should be a known dependence"); 108 109 // Currently we only support accesses with unit stride. FIXME: we should be 110 // able to handle non unit stirde as well as long as the stride is equal to 111 // the dependence distance. 112 if (getPtrStride(PSE, LoadPtr, L) != 1 || 113 getPtrStride(PSE, StorePtr, L) != 1) 114 return false; 115 116 auto &DL = Load->getParent()->getModule()->getDataLayout(); 117 unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType)); 118 119 auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr)); 120 auto *StorePtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(StorePtr)); 121 122 // We don't need to check non-wrapping here because forward/backward 123 // dependence wouldn't be valid if these weren't monotonic accesses. 124 auto *Dist = cast<SCEVConstant>( 125 PSE.getSE()->getMinusSCEV(StorePtrSCEV, LoadPtrSCEV)); 126 const APInt &Val = Dist->getAPInt(); 127 return Val == TypeByteSize; 128 } 129 130 Value *getLoadPtr() const { return Load->getPointerOperand(); } 131 132 #ifndef NDEBUG 133 friend raw_ostream &operator<<(raw_ostream &OS, 134 const StoreToLoadForwardingCandidate &Cand) { 135 OS << *Cand.Store << " -->\n"; 136 OS.indent(2) << *Cand.Load << "\n"; 137 return OS; 138 } 139 #endif 140 }; 141 142 } // end anonymous namespace 143 144 /// Check if the store dominates all latches, so as long as there is no 145 /// intervening store this value will be loaded in the next iteration. 146 static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L, 147 DominatorTree *DT) { 148 SmallVector<BasicBlock *, 8> Latches; 149 L->getLoopLatches(Latches); 150 return llvm::all_of(Latches, [&](const BasicBlock *Latch) { 151 return DT->dominates(StoreBlock, Latch); 152 }); 153 } 154 155 /// Return true if the load is not executed on all paths in the loop. 156 static bool isLoadConditional(LoadInst *Load, Loop *L) { 157 return Load->getParent() != L->getHeader(); 158 } 159 160 namespace { 161 162 /// The per-loop class that does most of the work. 163 class LoadEliminationForLoop { 164 public: 165 LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI, 166 DominatorTree *DT, BlockFrequencyInfo *BFI, 167 ProfileSummaryInfo* PSI) 168 : L(L), LI(LI), LAI(LAI), DT(DT), BFI(BFI), PSI(PSI), PSE(LAI.getPSE()) {} 169 170 /// Look through the loop-carried and loop-independent dependences in 171 /// this loop and find store->load dependences. 172 /// 173 /// Note that no candidate is returned if LAA has failed to analyze the loop 174 /// (e.g. if it's not bottom-tested, contains volatile memops, etc.) 175 std::forward_list<StoreToLoadForwardingCandidate> 176 findStoreToLoadDependences(const LoopAccessInfo &LAI) { 177 std::forward_list<StoreToLoadForwardingCandidate> Candidates; 178 179 const auto *Deps = LAI.getDepChecker().getDependences(); 180 if (!Deps) 181 return Candidates; 182 183 // Find store->load dependences (consequently true dep). Both lexically 184 // forward and backward dependences qualify. Disqualify loads that have 185 // other unknown dependences. 186 187 SmallPtrSet<Instruction *, 4> LoadsWithUnknownDepedence; 188 189 for (const auto &Dep : *Deps) { 190 Instruction *Source = Dep.getSource(LAI); 191 Instruction *Destination = Dep.getDestination(LAI); 192 193 if (Dep.Type == MemoryDepChecker::Dependence::Unknown) { 194 if (isa<LoadInst>(Source)) 195 LoadsWithUnknownDepedence.insert(Source); 196 if (isa<LoadInst>(Destination)) 197 LoadsWithUnknownDepedence.insert(Destination); 198 continue; 199 } 200 201 if (Dep.isBackward()) 202 // Note that the designations source and destination follow the program 203 // order, i.e. source is always first. (The direction is given by the 204 // DepType.) 205 std::swap(Source, Destination); 206 else 207 assert(Dep.isForward() && "Needs to be a forward dependence"); 208 209 auto *Store = dyn_cast<StoreInst>(Source); 210 if (!Store) 211 continue; 212 auto *Load = dyn_cast<LoadInst>(Destination); 213 if (!Load) 214 continue; 215 216 // Only progagate the value if they are of the same type. 217 if (Store->getPointerOperandType() != Load->getPointerOperandType()) 218 continue; 219 220 Candidates.emplace_front(Load, Store); 221 } 222 223 if (!LoadsWithUnknownDepedence.empty()) 224 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) { 225 return LoadsWithUnknownDepedence.count(C.Load); 226 }); 227 228 return Candidates; 229 } 230 231 /// Return the index of the instruction according to program order. 232 unsigned getInstrIndex(Instruction *Inst) { 233 auto I = InstOrder.find(Inst); 234 assert(I != InstOrder.end() && "No index for instruction"); 235 return I->second; 236 } 237 238 /// If a load has multiple candidates associated (i.e. different 239 /// stores), it means that it could be forwarding from multiple stores 240 /// depending on control flow. Remove these candidates. 241 /// 242 /// Here, we rely on LAA to include the relevant loop-independent dependences. 243 /// LAA is known to omit these in the very simple case when the read and the 244 /// write within an alias set always takes place using the *same* pointer. 245 /// 246 /// However, we know that this is not the case here, i.e. we can rely on LAA 247 /// to provide us with loop-independent dependences for the cases we're 248 /// interested. Consider the case for example where a loop-independent 249 /// dependece S1->S2 invalidates the forwarding S3->S2. 250 /// 251 /// A[i] = ... (S1) 252 /// ... = A[i] (S2) 253 /// A[i+1] = ... (S3) 254 /// 255 /// LAA will perform dependence analysis here because there are two 256 /// *different* pointers involved in the same alias set (&A[i] and &A[i+1]). 257 void removeDependencesFromMultipleStores( 258 std::forward_list<StoreToLoadForwardingCandidate> &Candidates) { 259 // If Store is nullptr it means that we have multiple stores forwarding to 260 // this store. 261 using LoadToSingleCandT = 262 DenseMap<LoadInst *, const StoreToLoadForwardingCandidate *>; 263 LoadToSingleCandT LoadToSingleCand; 264 265 for (const auto &Cand : Candidates) { 266 bool NewElt; 267 LoadToSingleCandT::iterator Iter; 268 269 std::tie(Iter, NewElt) = 270 LoadToSingleCand.insert(std::make_pair(Cand.Load, &Cand)); 271 if (!NewElt) { 272 const StoreToLoadForwardingCandidate *&OtherCand = Iter->second; 273 // Already multiple stores forward to this load. 274 if (OtherCand == nullptr) 275 continue; 276 277 // Handle the very basic case when the two stores are in the same block 278 // so deciding which one forwards is easy. The later one forwards as 279 // long as they both have a dependence distance of one to the load. 280 if (Cand.Store->getParent() == OtherCand->Store->getParent() && 281 Cand.isDependenceDistanceOfOne(PSE, L) && 282 OtherCand->isDependenceDistanceOfOne(PSE, L)) { 283 // They are in the same block, the later one will forward to the load. 284 if (getInstrIndex(OtherCand->Store) < getInstrIndex(Cand.Store)) 285 OtherCand = &Cand; 286 } else 287 OtherCand = nullptr; 288 } 289 } 290 291 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) { 292 if (LoadToSingleCand[Cand.Load] != &Cand) { 293 LLVM_DEBUG( 294 dbgs() << "Removing from candidates: \n" 295 << Cand 296 << " The load may have multiple stores forwarding to " 297 << "it\n"); 298 return true; 299 } 300 return false; 301 }); 302 } 303 304 /// Given two pointers operations by their RuntimePointerChecking 305 /// indices, return true if they require an alias check. 306 /// 307 /// We need a check if one is a pointer for a candidate load and the other is 308 /// a pointer for a possibly intervening store. 309 bool needsChecking(unsigned PtrIdx1, unsigned PtrIdx2, 310 const SmallPtrSet<Value *, 4> &PtrsWrittenOnFwdingPath, 311 const std::set<Value *> &CandLoadPtrs) { 312 Value *Ptr1 = 313 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx1).PointerValue; 314 Value *Ptr2 = 315 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx2).PointerValue; 316 return ((PtrsWrittenOnFwdingPath.count(Ptr1) && CandLoadPtrs.count(Ptr2)) || 317 (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1))); 318 } 319 320 /// Return pointers that are possibly written to on the path from a 321 /// forwarding store to a load. 322 /// 323 /// These pointers need to be alias-checked against the forwarding candidates. 324 SmallPtrSet<Value *, 4> findPointersWrittenOnForwardingPath( 325 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) { 326 // From FirstStore to LastLoad neither of the elimination candidate loads 327 // should overlap with any of the stores. 328 // 329 // E.g.: 330 // 331 // st1 C[i] 332 // ld1 B[i] <-------, 333 // ld0 A[i] <----, | * LastLoad 334 // ... | | 335 // st2 E[i] | | 336 // st3 B[i+1] -- | -' * FirstStore 337 // st0 A[i+1] ---' 338 // st4 D[i] 339 // 340 // st0 forwards to ld0 if the accesses in st4 and st1 don't overlap with 341 // ld0. 342 343 LoadInst *LastLoad = 344 std::max_element(Candidates.begin(), Candidates.end(), 345 [&](const StoreToLoadForwardingCandidate &A, 346 const StoreToLoadForwardingCandidate &B) { 347 return getInstrIndex(A.Load) < getInstrIndex(B.Load); 348 }) 349 ->Load; 350 StoreInst *FirstStore = 351 std::min_element(Candidates.begin(), Candidates.end(), 352 [&](const StoreToLoadForwardingCandidate &A, 353 const StoreToLoadForwardingCandidate &B) { 354 return getInstrIndex(A.Store) < 355 getInstrIndex(B.Store); 356 }) 357 ->Store; 358 359 // We're looking for stores after the first forwarding store until the end 360 // of the loop, then from the beginning of the loop until the last 361 // forwarded-to load. Collect the pointer for the stores. 362 SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath; 363 364 auto InsertStorePtr = [&](Instruction *I) { 365 if (auto *S = dyn_cast<StoreInst>(I)) 366 PtrsWrittenOnFwdingPath.insert(S->getPointerOperand()); 367 }; 368 const auto &MemInstrs = LAI.getDepChecker().getMemoryInstructions(); 369 std::for_each(MemInstrs.begin() + getInstrIndex(FirstStore) + 1, 370 MemInstrs.end(), InsertStorePtr); 371 std::for_each(MemInstrs.begin(), &MemInstrs[getInstrIndex(LastLoad)], 372 InsertStorePtr); 373 374 return PtrsWrittenOnFwdingPath; 375 } 376 377 /// Determine the pointer alias checks to prove that there are no 378 /// intervening stores. 379 SmallVector<RuntimePointerChecking::PointerCheck, 4> collectMemchecks( 380 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) { 381 382 SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath = 383 findPointersWrittenOnForwardingPath(Candidates); 384 385 // Collect the pointers of the candidate loads. 386 // FIXME: SmallPtrSet does not work with std::inserter. 387 std::set<Value *> CandLoadPtrs; 388 transform(Candidates, 389 std::inserter(CandLoadPtrs, CandLoadPtrs.begin()), 390 std::mem_fn(&StoreToLoadForwardingCandidate::getLoadPtr)); 391 392 const auto &AllChecks = LAI.getRuntimePointerChecking()->getChecks(); 393 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks; 394 395 copy_if(AllChecks, std::back_inserter(Checks), 396 [&](const RuntimePointerChecking::PointerCheck &Check) { 397 for (auto PtrIdx1 : Check.first->Members) 398 for (auto PtrIdx2 : Check.second->Members) 399 if (needsChecking(PtrIdx1, PtrIdx2, PtrsWrittenOnFwdingPath, 400 CandLoadPtrs)) 401 return true; 402 return false; 403 }); 404 405 LLVM_DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size() 406 << "):\n"); 407 LLVM_DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks)); 408 409 return Checks; 410 } 411 412 /// Perform the transformation for a candidate. 413 void 414 propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand, 415 SCEVExpander &SEE) { 416 // loop: 417 // %x = load %gep_i 418 // = ... %x 419 // store %y, %gep_i_plus_1 420 // 421 // => 422 // 423 // ph: 424 // %x.initial = load %gep_0 425 // loop: 426 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop] 427 // %x = load %gep_i <---- now dead 428 // = ... %x.storeforward 429 // store %y, %gep_i_plus_1 430 431 Value *Ptr = Cand.Load->getPointerOperand(); 432 auto *PtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(Ptr)); 433 auto *PH = L->getLoopPreheader(); 434 Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(), 435 PH->getTerminator()); 436 Value *Initial = new LoadInst( 437 Cand.Load->getType(), InitialPtr, "load_initial", 438 /* isVolatile */ false, MaybeAlign(Cand.Load->getAlignment()), 439 PH->getTerminator()); 440 441 PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded", 442 &L->getHeader()->front()); 443 PHI->addIncoming(Initial, PH); 444 PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch()); 445 446 Cand.Load->replaceAllUsesWith(PHI); 447 } 448 449 /// Top-level driver for each loop: find store->load forwarding 450 /// candidates, add run-time checks and perform transformation. 451 bool processLoop() { 452 LLVM_DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName() 453 << "\" checking " << *L << "\n"); 454 455 // Look for store-to-load forwarding cases across the 456 // backedge. E.g.: 457 // 458 // loop: 459 // %x = load %gep_i 460 // = ... %x 461 // store %y, %gep_i_plus_1 462 // 463 // => 464 // 465 // ph: 466 // %x.initial = load %gep_0 467 // loop: 468 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop] 469 // %x = load %gep_i <---- now dead 470 // = ... %x.storeforward 471 // store %y, %gep_i_plus_1 472 473 // First start with store->load dependences. 474 auto StoreToLoadDependences = findStoreToLoadDependences(LAI); 475 if (StoreToLoadDependences.empty()) 476 return false; 477 478 // Generate an index for each load and store according to the original 479 // program order. This will be used later. 480 InstOrder = LAI.getDepChecker().generateInstructionOrderMap(); 481 482 // To keep things simple for now, remove those where the load is potentially 483 // fed by multiple stores. 484 removeDependencesFromMultipleStores(StoreToLoadDependences); 485 if (StoreToLoadDependences.empty()) 486 return false; 487 488 // Filter the candidates further. 489 SmallVector<StoreToLoadForwardingCandidate, 4> Candidates; 490 unsigned NumForwarding = 0; 491 for (const StoreToLoadForwardingCandidate Cand : StoreToLoadDependences) { 492 LLVM_DEBUG(dbgs() << "Candidate " << Cand); 493 494 // Make sure that the stored values is available everywhere in the loop in 495 // the next iteration. 496 if (!doesStoreDominatesAllLatches(Cand.Store->getParent(), L, DT)) 497 continue; 498 499 // If the load is conditional we can't hoist its 0-iteration instance to 500 // the preheader because that would make it unconditional. Thus we would 501 // access a memory location that the original loop did not access. 502 if (isLoadConditional(Cand.Load, L)) 503 continue; 504 505 // Check whether the SCEV difference is the same as the induction step, 506 // thus we load the value in the next iteration. 507 if (!Cand.isDependenceDistanceOfOne(PSE, L)) 508 continue; 509 510 ++NumForwarding; 511 LLVM_DEBUG( 512 dbgs() 513 << NumForwarding 514 << ". Valid store-to-load forwarding across the loop backedge\n"); 515 Candidates.push_back(Cand); 516 } 517 if (Candidates.empty()) 518 return false; 519 520 // Check intervening may-alias stores. These need runtime checks for alias 521 // disambiguation. 522 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks = 523 collectMemchecks(Candidates); 524 525 // Too many checks are likely to outweigh the benefits of forwarding. 526 if (Checks.size() > Candidates.size() * CheckPerElim) { 527 LLVM_DEBUG(dbgs() << "Too many run-time checks needed.\n"); 528 return false; 529 } 530 531 if (LAI.getPSE().getUnionPredicate().getComplexity() > 532 LoadElimSCEVCheckThreshold) { 533 LLVM_DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n"); 534 return false; 535 } 536 537 if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) { 538 if (LAI.hasConvergentOp()) { 539 LLVM_DEBUG(dbgs() << "Versioning is needed but not allowed with " 540 "convergent calls\n"); 541 return false; 542 } 543 544 auto *HeaderBB = L->getHeader(); 545 auto *F = HeaderBB->getParent(); 546 bool OptForSize = F->hasOptSize() || 547 llvm::shouldOptimizeForSize(HeaderBB, PSI, BFI); 548 if (OptForSize) { 549 LLVM_DEBUG( 550 dbgs() << "Versioning is needed but not allowed when optimizing " 551 "for size.\n"); 552 return false; 553 } 554 555 if (!L->isLoopSimplifyForm()) { 556 LLVM_DEBUG(dbgs() << "Loop is not is loop-simplify form"); 557 return false; 558 } 559 560 // Point of no-return, start the transformation. First, version the loop 561 // if necessary. 562 563 LoopVersioning LV(LAI, L, LI, DT, PSE.getSE(), false); 564 LV.setAliasChecks(std::move(Checks)); 565 LV.setSCEVChecks(LAI.getPSE().getUnionPredicate()); 566 LV.versionLoop(); 567 } 568 569 // Next, propagate the value stored by the store to the users of the load. 570 // Also for the first iteration, generate the initial value of the load. 571 SCEVExpander SEE(*PSE.getSE(), L->getHeader()->getModule()->getDataLayout(), 572 "storeforward"); 573 for (const auto &Cand : Candidates) 574 propagateStoredValueToLoadUsers(Cand, SEE); 575 NumLoopLoadEliminted += NumForwarding; 576 577 return true; 578 } 579 580 private: 581 Loop *L; 582 583 /// Maps the load/store instructions to their index according to 584 /// program order. 585 DenseMap<Instruction *, unsigned> InstOrder; 586 587 // Analyses used. 588 LoopInfo *LI; 589 const LoopAccessInfo &LAI; 590 DominatorTree *DT; 591 BlockFrequencyInfo *BFI; 592 ProfileSummaryInfo *PSI; 593 PredicatedScalarEvolution PSE; 594 }; 595 596 } // end anonymous namespace 597 598 static bool 599 eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI, DominatorTree &DT, 600 BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI, 601 function_ref<const LoopAccessInfo &(Loop &)> GetLAI) { 602 // Build up a worklist of inner-loops to transform to avoid iterator 603 // invalidation. 604 // FIXME: This logic comes from other passes that actually change the loop 605 // nest structure. It isn't clear this is necessary (or useful) for a pass 606 // which merely optimizes the use of loads in a loop. 607 SmallVector<Loop *, 8> Worklist; 608 609 for (Loop *TopLevelLoop : LI) 610 for (Loop *L : depth_first(TopLevelLoop)) 611 // We only handle inner-most loops. 612 if (L->empty()) 613 Worklist.push_back(L); 614 615 // Now walk the identified inner loops. 616 bool Changed = false; 617 for (Loop *L : Worklist) { 618 // The actual work is performed by LoadEliminationForLoop. 619 LoadEliminationForLoop LEL(L, &LI, GetLAI(*L), &DT, BFI, PSI); 620 Changed |= LEL.processLoop(); 621 } 622 return Changed; 623 } 624 625 namespace { 626 627 /// The pass. Most of the work is delegated to the per-loop 628 /// LoadEliminationForLoop class. 629 class LoopLoadElimination : public FunctionPass { 630 public: 631 static char ID; 632 633 LoopLoadElimination() : FunctionPass(ID) { 634 initializeLoopLoadEliminationPass(*PassRegistry::getPassRegistry()); 635 } 636 637 bool runOnFunction(Function &F) override { 638 if (skipFunction(F)) 639 return false; 640 641 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 642 auto &LAA = getAnalysis<LoopAccessLegacyAnalysis>(); 643 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 644 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 645 auto *BFI = (PSI && PSI->hasProfileSummary()) ? 646 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() : 647 nullptr; 648 649 // Process each loop nest in the function. 650 return eliminateLoadsAcrossLoops( 651 F, LI, DT, BFI, PSI, 652 [&LAA](Loop &L) -> const LoopAccessInfo & { return LAA.getInfo(&L); }); 653 } 654 655 void getAnalysisUsage(AnalysisUsage &AU) const override { 656 AU.addRequiredID(LoopSimplifyID); 657 AU.addRequired<LoopInfoWrapperPass>(); 658 AU.addPreserved<LoopInfoWrapperPass>(); 659 AU.addRequired<LoopAccessLegacyAnalysis>(); 660 AU.addRequired<ScalarEvolutionWrapperPass>(); 661 AU.addRequired<DominatorTreeWrapperPass>(); 662 AU.addPreserved<DominatorTreeWrapperPass>(); 663 AU.addPreserved<GlobalsAAWrapperPass>(); 664 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 665 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 666 } 667 }; 668 669 } // end anonymous namespace 670 671 char LoopLoadElimination::ID; 672 673 static const char LLE_name[] = "Loop Load Elimination"; 674 675 INITIALIZE_PASS_BEGIN(LoopLoadElimination, LLE_OPTION, LLE_name, false, false) 676 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 677 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis) 678 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 679 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 680 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 681 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 682 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass) 683 INITIALIZE_PASS_END(LoopLoadElimination, LLE_OPTION, LLE_name, false, false) 684 685 FunctionPass *llvm::createLoopLoadEliminationPass() { 686 return new LoopLoadElimination(); 687 } 688 689 PreservedAnalyses LoopLoadEliminationPass::run(Function &F, 690 FunctionAnalysisManager &AM) { 691 auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F); 692 auto &LI = AM.getResult<LoopAnalysis>(F); 693 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 694 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 695 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 696 auto &AA = AM.getResult<AAManager>(F); 697 auto &AC = AM.getResult<AssumptionAnalysis>(F); 698 auto &MAM = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager(); 699 auto *PSI = MAM.getCachedResult<ProfileSummaryAnalysis>(*F.getParent()); 700 auto *BFI = (PSI && PSI->hasProfileSummary()) ? 701 &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr; 702 MemorySSA *MSSA = EnableMSSALoopDependency 703 ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() 704 : nullptr; 705 706 auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager(); 707 bool Changed = eliminateLoadsAcrossLoops( 708 F, LI, DT, BFI, PSI, [&](Loop &L) -> const LoopAccessInfo & { 709 LoopStandardAnalysisResults AR = {AA, AC, DT, LI, SE, TLI, TTI, MSSA}; 710 return LAM.getResult<LoopAccessAnalysis>(L, AR); 711 }); 712 713 if (!Changed) 714 return PreservedAnalyses::all(); 715 716 PreservedAnalyses PA; 717 return PA; 718 } 719