1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the MemorySSA class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/MemorySSA.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/DenseMapInfo.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/Hashing.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/iterator.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/IteratedDominanceFrontier.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Config/llvm-config.h" 30 #include "llvm/IR/AssemblyAnnotationWriter.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Dominators.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/Instruction.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/LLVMContext.h" 39 #include "llvm/IR/PassManager.h" 40 #include "llvm/IR/Use.h" 41 #include "llvm/InitializePasses.h" 42 #include "llvm/Pass.h" 43 #include "llvm/Support/AtomicOrdering.h" 44 #include "llvm/Support/Casting.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/Compiler.h" 47 #include "llvm/Support/Debug.h" 48 #include "llvm/Support/ErrorHandling.h" 49 #include "llvm/Support/FormattedStream.h" 50 #include "llvm/Support/raw_ostream.h" 51 #include <algorithm> 52 #include <cassert> 53 #include <cstdlib> 54 #include <iterator> 55 #include <memory> 56 #include <utility> 57 58 using namespace llvm; 59 60 #define DEBUG_TYPE "memoryssa" 61 62 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 63 true) 64 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 65 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 66 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 67 true) 68 69 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa", 70 "Memory SSA Printer", false, false) 71 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 72 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa", 73 "Memory SSA Printer", false, false) 74 75 static cl::opt<unsigned> MaxCheckLimit( 76 "memssa-check-limit", cl::Hidden, cl::init(100), 77 cl::desc("The maximum number of stores/phis MemorySSA" 78 "will consider trying to walk past (default = 100)")); 79 80 // Always verify MemorySSA if expensive checking is enabled. 81 #ifdef EXPENSIVE_CHECKS 82 bool llvm::VerifyMemorySSA = true; 83 #else 84 bool llvm::VerifyMemorySSA = false; 85 #endif 86 /// Enables memory ssa as a dependency for loop passes in legacy pass manager. 87 cl::opt<bool> llvm::EnableMSSALoopDependency( 88 "enable-mssa-loop-dependency", cl::Hidden, cl::init(true), 89 cl::desc("Enable MemorySSA dependency for loop pass manager")); 90 91 static cl::opt<bool, true> 92 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), 93 cl::Hidden, cl::desc("Enable verification of MemorySSA.")); 94 95 namespace llvm { 96 97 /// An assembly annotator class to print Memory SSA information in 98 /// comments. 99 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { 100 friend class MemorySSA; 101 102 const MemorySSA *MSSA; 103 104 public: 105 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} 106 107 void emitBasicBlockStartAnnot(const BasicBlock *BB, 108 formatted_raw_ostream &OS) override { 109 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) 110 OS << "; " << *MA << "\n"; 111 } 112 113 void emitInstructionAnnot(const Instruction *I, 114 formatted_raw_ostream &OS) override { 115 if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) 116 OS << "; " << *MA << "\n"; 117 } 118 }; 119 120 } // end namespace llvm 121 122 namespace { 123 124 /// Our current alias analysis API differentiates heavily between calls and 125 /// non-calls, and functions called on one usually assert on the other. 126 /// This class encapsulates the distinction to simplify other code that wants 127 /// "Memory affecting instructions and related data" to use as a key. 128 /// For example, this class is used as a densemap key in the use optimizer. 129 class MemoryLocOrCall { 130 public: 131 bool IsCall = false; 132 133 MemoryLocOrCall(MemoryUseOrDef *MUD) 134 : MemoryLocOrCall(MUD->getMemoryInst()) {} 135 MemoryLocOrCall(const MemoryUseOrDef *MUD) 136 : MemoryLocOrCall(MUD->getMemoryInst()) {} 137 138 MemoryLocOrCall(Instruction *Inst) { 139 if (auto *C = dyn_cast<CallBase>(Inst)) { 140 IsCall = true; 141 Call = C; 142 } else { 143 IsCall = false; 144 // There is no such thing as a memorylocation for a fence inst, and it is 145 // unique in that regard. 146 if (!isa<FenceInst>(Inst)) 147 Loc = MemoryLocation::get(Inst); 148 } 149 } 150 151 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} 152 153 const CallBase *getCall() const { 154 assert(IsCall); 155 return Call; 156 } 157 158 MemoryLocation getLoc() const { 159 assert(!IsCall); 160 return Loc; 161 } 162 163 bool operator==(const MemoryLocOrCall &Other) const { 164 if (IsCall != Other.IsCall) 165 return false; 166 167 if (!IsCall) 168 return Loc == Other.Loc; 169 170 if (Call->getCalledOperand() != Other.Call->getCalledOperand()) 171 return false; 172 173 return Call->arg_size() == Other.Call->arg_size() && 174 std::equal(Call->arg_begin(), Call->arg_end(), 175 Other.Call->arg_begin()); 176 } 177 178 private: 179 union { 180 const CallBase *Call; 181 MemoryLocation Loc; 182 }; 183 }; 184 185 } // end anonymous namespace 186 187 namespace llvm { 188 189 template <> struct DenseMapInfo<MemoryLocOrCall> { 190 static inline MemoryLocOrCall getEmptyKey() { 191 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); 192 } 193 194 static inline MemoryLocOrCall getTombstoneKey() { 195 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); 196 } 197 198 static unsigned getHashValue(const MemoryLocOrCall &MLOC) { 199 if (!MLOC.IsCall) 200 return hash_combine( 201 MLOC.IsCall, 202 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); 203 204 hash_code hash = 205 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( 206 MLOC.getCall()->getCalledOperand())); 207 208 for (const Value *Arg : MLOC.getCall()->args()) 209 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); 210 return hash; 211 } 212 213 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { 214 return LHS == RHS; 215 } 216 }; 217 218 } // end namespace llvm 219 220 /// This does one-way checks to see if Use could theoretically be hoisted above 221 /// MayClobber. This will not check the other way around. 222 /// 223 /// This assumes that, for the purposes of MemorySSA, Use comes directly after 224 /// MayClobber, with no potentially clobbering operations in between them. 225 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) 226 static bool areLoadsReorderable(const LoadInst *Use, 227 const LoadInst *MayClobber) { 228 bool VolatileUse = Use->isVolatile(); 229 bool VolatileClobber = MayClobber->isVolatile(); 230 // Volatile operations may never be reordered with other volatile operations. 231 if (VolatileUse && VolatileClobber) 232 return false; 233 // Otherwise, volatile doesn't matter here. From the language reference: 234 // 'optimizers may change the order of volatile operations relative to 235 // non-volatile operations.'" 236 237 // If a load is seq_cst, it cannot be moved above other loads. If its ordering 238 // is weaker, it can be moved above other loads. We just need to be sure that 239 // MayClobber isn't an acquire load, because loads can't be moved above 240 // acquire loads. 241 // 242 // Note that this explicitly *does* allow the free reordering of monotonic (or 243 // weaker) loads of the same address. 244 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; 245 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), 246 AtomicOrdering::Acquire); 247 return !(SeqCstUse || MayClobberIsAcquire); 248 } 249 250 namespace { 251 252 struct ClobberAlias { 253 bool IsClobber; 254 Optional<AliasResult> AR; 255 }; 256 257 } // end anonymous namespace 258 259 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being 260 // ignored if IsClobber = false. 261 template <typename AliasAnalysisType> 262 static ClobberAlias 263 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, 264 const Instruction *UseInst, AliasAnalysisType &AA) { 265 Instruction *DefInst = MD->getMemoryInst(); 266 assert(DefInst && "Defining instruction not actually an instruction"); 267 const auto *UseCall = dyn_cast<CallBase>(UseInst); 268 Optional<AliasResult> AR; 269 270 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { 271 // These intrinsics will show up as affecting memory, but they are just 272 // markers, mostly. 273 // 274 // FIXME: We probably don't actually want MemorySSA to model these at all 275 // (including creating MemoryAccesses for them): we just end up inventing 276 // clobbers where they don't really exist at all. Please see D43269 for 277 // context. 278 switch (II->getIntrinsicID()) { 279 case Intrinsic::lifetime_start: 280 if (UseCall) 281 return {false, NoAlias}; 282 AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); 283 return {AR != NoAlias, AR}; 284 case Intrinsic::lifetime_end: 285 case Intrinsic::invariant_start: 286 case Intrinsic::invariant_end: 287 case Intrinsic::assume: 288 return {false, NoAlias}; 289 case Intrinsic::dbg_addr: 290 case Intrinsic::dbg_declare: 291 case Intrinsic::dbg_label: 292 case Intrinsic::dbg_value: 293 llvm_unreachable("debuginfo shouldn't have associated defs!"); 294 default: 295 break; 296 } 297 } 298 299 if (UseCall) { 300 ModRefInfo I = AA.getModRefInfo(DefInst, UseCall); 301 AR = isMustSet(I) ? MustAlias : MayAlias; 302 return {isModOrRefSet(I), AR}; 303 } 304 305 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) 306 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) 307 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; 308 309 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); 310 AR = isMustSet(I) ? MustAlias : MayAlias; 311 return {isModSet(I), AR}; 312 } 313 314 template <typename AliasAnalysisType> 315 static ClobberAlias instructionClobbersQuery(MemoryDef *MD, 316 const MemoryUseOrDef *MU, 317 const MemoryLocOrCall &UseMLOC, 318 AliasAnalysisType &AA) { 319 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery 320 // to exist while MemoryLocOrCall is pushed through places. 321 if (UseMLOC.IsCall) 322 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), 323 AA); 324 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), 325 AA); 326 } 327 328 // Return true when MD may alias MU, return false otherwise. 329 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 330 AliasAnalysis &AA) { 331 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; 332 } 333 334 namespace { 335 336 struct UpwardsMemoryQuery { 337 // True if our original query started off as a call 338 bool IsCall = false; 339 // The pointer location we started the query with. This will be empty if 340 // IsCall is true. 341 MemoryLocation StartingLoc; 342 // This is the instruction we were querying about. 343 const Instruction *Inst = nullptr; 344 // The MemoryAccess we actually got called with, used to test local domination 345 const MemoryAccess *OriginalAccess = nullptr; 346 Optional<AliasResult> AR = MayAlias; 347 bool SkipSelfAccess = false; 348 349 UpwardsMemoryQuery() = default; 350 351 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) 352 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) { 353 if (!IsCall) 354 StartingLoc = MemoryLocation::get(Inst); 355 } 356 }; 357 358 } // end anonymous namespace 359 360 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, 361 BatchAAResults &AA) { 362 Instruction *Inst = MD->getMemoryInst(); 363 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 364 switch (II->getIntrinsicID()) { 365 case Intrinsic::lifetime_end: 366 return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias; 367 default: 368 return false; 369 } 370 } 371 return false; 372 } 373 374 template <typename AliasAnalysisType> 375 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, 376 const Instruction *I) { 377 // If the memory can't be changed, then loads of the memory can't be 378 // clobbered. 379 return isa<LoadInst>(I) && (I->hasMetadata(LLVMContext::MD_invariant_load) || 380 AA.pointsToConstantMemory(MemoryLocation( 381 cast<LoadInst>(I)->getPointerOperand()))); 382 } 383 384 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing 385 /// inbetween `Start` and `ClobberAt` can clobbers `Start`. 386 /// 387 /// This is meant to be as simple and self-contained as possible. Because it 388 /// uses no cache, etc., it can be relatively expensive. 389 /// 390 /// \param Start The MemoryAccess that we want to walk from. 391 /// \param ClobberAt A clobber for Start. 392 /// \param StartLoc The MemoryLocation for Start. 393 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. 394 /// \param Query The UpwardsMemoryQuery we used for our search. 395 /// \param AA The AliasAnalysis we used for our search. 396 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. 397 398 template <typename AliasAnalysisType> 399 LLVM_ATTRIBUTE_UNUSED static void 400 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, 401 const MemoryLocation &StartLoc, const MemorySSA &MSSA, 402 const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, 403 bool AllowImpreciseClobber = false) { 404 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"); 405 406 if (MSSA.isLiveOnEntryDef(Start)) { 407 assert(MSSA.isLiveOnEntryDef(ClobberAt) && 408 "liveOnEntry must clobber itself"); 409 return; 410 } 411 412 bool FoundClobber = false; 413 DenseSet<ConstMemoryAccessPair> VisitedPhis; 414 SmallVector<ConstMemoryAccessPair, 8> Worklist; 415 Worklist.emplace_back(Start, StartLoc); 416 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one 417 // is found, complain. 418 while (!Worklist.empty()) { 419 auto MAP = Worklist.pop_back_val(); 420 // All we care about is that nothing from Start to ClobberAt clobbers Start. 421 // We learn nothing from revisiting nodes. 422 if (!VisitedPhis.insert(MAP).second) 423 continue; 424 425 for (const auto *MA : def_chain(MAP.first)) { 426 if (MA == ClobberAt) { 427 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 428 // instructionClobbersQuery isn't essentially free, so don't use `|=`, 429 // since it won't let us short-circuit. 430 // 431 // Also, note that this can't be hoisted out of the `Worklist` loop, 432 // since MD may only act as a clobber for 1 of N MemoryLocations. 433 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); 434 if (!FoundClobber) { 435 ClobberAlias CA = 436 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); 437 if (CA.IsClobber) { 438 FoundClobber = true; 439 // Not used: CA.AR; 440 } 441 } 442 } 443 break; 444 } 445 446 // We should never hit liveOnEntry, unless it's the clobber. 447 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?"); 448 449 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 450 // If Start is a Def, skip self. 451 if (MD == Start) 452 continue; 453 454 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) 455 .IsClobber && 456 "Found clobber before reaching ClobberAt!"); 457 continue; 458 } 459 460 if (const auto *MU = dyn_cast<MemoryUse>(MA)) { 461 (void)MU; 462 assert (MU == Start && 463 "Can only find use in def chain if Start is a use"); 464 continue; 465 } 466 467 assert(isa<MemoryPhi>(MA)); 468 Worklist.append( 469 upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}, 470 MSSA.getDomTree()), 471 upward_defs_end()); 472 } 473 } 474 475 // If the verify is done following an optimization, it's possible that 476 // ClobberAt was a conservative clobbering, that we can now infer is not a 477 // true clobbering access. Don't fail the verify if that's the case. 478 // We do have accesses that claim they're optimized, but could be optimized 479 // further. Updating all these can be expensive, so allow it for now (FIXME). 480 if (AllowImpreciseClobber) 481 return; 482 483 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a 484 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. 485 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) && 486 "ClobberAt never acted as a clobber"); 487 } 488 489 namespace { 490 491 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up 492 /// in one class. 493 template <class AliasAnalysisType> class ClobberWalker { 494 /// Save a few bytes by using unsigned instead of size_t. 495 using ListIndex = unsigned; 496 497 /// Represents a span of contiguous MemoryDefs, potentially ending in a 498 /// MemoryPhi. 499 struct DefPath { 500 MemoryLocation Loc; 501 // Note that, because we always walk in reverse, Last will always dominate 502 // First. Also note that First and Last are inclusive. 503 MemoryAccess *First; 504 MemoryAccess *Last; 505 Optional<ListIndex> Previous; 506 507 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, 508 Optional<ListIndex> Previous) 509 : Loc(Loc), First(First), Last(Last), Previous(Previous) {} 510 511 DefPath(const MemoryLocation &Loc, MemoryAccess *Init, 512 Optional<ListIndex> Previous) 513 : DefPath(Loc, Init, Init, Previous) {} 514 }; 515 516 const MemorySSA &MSSA; 517 AliasAnalysisType &AA; 518 DominatorTree &DT; 519 UpwardsMemoryQuery *Query; 520 unsigned *UpwardWalkLimit; 521 522 // Phi optimization bookkeeping 523 SmallVector<DefPath, 32> Paths; 524 DenseSet<ConstMemoryAccessPair> VisitedPhis; 525 526 /// Find the nearest def or phi that `From` can legally be optimized to. 527 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { 528 assert(From->getNumOperands() && "Phi with no operands?"); 529 530 BasicBlock *BB = From->getBlock(); 531 MemoryAccess *Result = MSSA.getLiveOnEntryDef(); 532 DomTreeNode *Node = DT.getNode(BB); 533 while ((Node = Node->getIDom())) { 534 auto *Defs = MSSA.getBlockDefs(Node->getBlock()); 535 if (Defs) 536 return &*Defs->rbegin(); 537 } 538 return Result; 539 } 540 541 /// Result of calling walkToPhiOrClobber. 542 struct UpwardsWalkResult { 543 /// The "Result" of the walk. Either a clobber, the last thing we walked, or 544 /// both. Include alias info when clobber found. 545 MemoryAccess *Result; 546 bool IsKnownClobber; 547 Optional<AliasResult> AR; 548 }; 549 550 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. 551 /// This will update Desc.Last as it walks. It will (optionally) also stop at 552 /// StopAt. 553 /// 554 /// This does not test for whether StopAt is a clobber 555 UpwardsWalkResult 556 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, 557 const MemoryAccess *SkipStopAt = nullptr) const { 558 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world"); 559 assert(UpwardWalkLimit && "Need a valid walk limit"); 560 bool LimitAlreadyReached = false; 561 // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set 562 // it to 1. This will not do any alias() calls. It either returns in the 563 // first iteration in the loop below, or is set back to 0 if all def chains 564 // are free of MemoryDefs. 565 if (!*UpwardWalkLimit) { 566 *UpwardWalkLimit = 1; 567 LimitAlreadyReached = true; 568 } 569 570 for (MemoryAccess *Current : def_chain(Desc.Last)) { 571 Desc.Last = Current; 572 if (Current == StopAt || Current == SkipStopAt) 573 return {Current, false, MayAlias}; 574 575 if (auto *MD = dyn_cast<MemoryDef>(Current)) { 576 if (MSSA.isLiveOnEntryDef(MD)) 577 return {MD, true, MustAlias}; 578 579 if (!--*UpwardWalkLimit) 580 return {Current, true, MayAlias}; 581 582 ClobberAlias CA = 583 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); 584 if (CA.IsClobber) 585 return {MD, true, CA.AR}; 586 } 587 } 588 589 if (LimitAlreadyReached) 590 *UpwardWalkLimit = 0; 591 592 assert(isa<MemoryPhi>(Desc.Last) && 593 "Ended at a non-clobber that's not a phi?"); 594 return {Desc.Last, false, MayAlias}; 595 } 596 597 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, 598 ListIndex PriorNode) { 599 auto UpwardDefs = make_range( 600 upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT), upward_defs_end()); 601 for (const MemoryAccessPair &P : UpwardDefs) { 602 PausedSearches.push_back(Paths.size()); 603 Paths.emplace_back(P.second, P.first, PriorNode); 604 } 605 } 606 607 /// Represents a search that terminated after finding a clobber. This clobber 608 /// may or may not be present in the path of defs from LastNode..SearchStart, 609 /// since it may have been retrieved from cache. 610 struct TerminatedPath { 611 MemoryAccess *Clobber; 612 ListIndex LastNode; 613 }; 614 615 /// Get an access that keeps us from optimizing to the given phi. 616 /// 617 /// PausedSearches is an array of indices into the Paths array. Its incoming 618 /// value is the indices of searches that stopped at the last phi optimization 619 /// target. It's left in an unspecified state. 620 /// 621 /// If this returns None, NewPaused is a vector of searches that terminated 622 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. 623 Optional<TerminatedPath> 624 getBlockingAccess(const MemoryAccess *StopWhere, 625 SmallVectorImpl<ListIndex> &PausedSearches, 626 SmallVectorImpl<ListIndex> &NewPaused, 627 SmallVectorImpl<TerminatedPath> &Terminated) { 628 assert(!PausedSearches.empty() && "No searches to continue?"); 629 630 // BFS vs DFS really doesn't make a difference here, so just do a DFS with 631 // PausedSearches as our stack. 632 while (!PausedSearches.empty()) { 633 ListIndex PathIndex = PausedSearches.pop_back_val(); 634 DefPath &Node = Paths[PathIndex]; 635 636 // If we've already visited this path with this MemoryLocation, we don't 637 // need to do so again. 638 // 639 // NOTE: That we just drop these paths on the ground makes caching 640 // behavior sporadic. e.g. given a diamond: 641 // A 642 // B C 643 // D 644 // 645 // ...If we walk D, B, A, C, we'll only cache the result of phi 646 // optimization for A, B, and D; C will be skipped because it dies here. 647 // This arguably isn't the worst thing ever, since: 648 // - We generally query things in a top-down order, so if we got below D 649 // without needing cache entries for {C, MemLoc}, then chances are 650 // that those cache entries would end up ultimately unused. 651 // - We still cache things for A, so C only needs to walk up a bit. 652 // If this behavior becomes problematic, we can fix without a ton of extra 653 // work. 654 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) 655 continue; 656 657 const MemoryAccess *SkipStopWhere = nullptr; 658 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) { 659 assert(isa<MemoryDef>(Query->OriginalAccess)); 660 SkipStopWhere = Query->OriginalAccess; 661 } 662 663 UpwardsWalkResult Res = walkToPhiOrClobber(Node, 664 /*StopAt=*/StopWhere, 665 /*SkipStopAt=*/SkipStopWhere); 666 if (Res.IsKnownClobber) { 667 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere); 668 669 // If this wasn't a cache hit, we hit a clobber when walking. That's a 670 // failure. 671 TerminatedPath Term{Res.Result, PathIndex}; 672 if (!MSSA.dominates(Res.Result, StopWhere)) 673 return Term; 674 675 // Otherwise, it's a valid thing to potentially optimize to. 676 Terminated.push_back(Term); 677 continue; 678 } 679 680 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) { 681 // We've hit our target. Save this path off for if we want to continue 682 // walking. If we are in the mode of skipping the OriginalAccess, and 683 // we've reached back to the OriginalAccess, do not save path, we've 684 // just looped back to self. 685 if (Res.Result != SkipStopWhere) 686 NewPaused.push_back(PathIndex); 687 continue; 688 } 689 690 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber"); 691 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); 692 } 693 694 return None; 695 } 696 697 template <typename T, typename Walker> 698 struct generic_def_path_iterator 699 : public iterator_facade_base<generic_def_path_iterator<T, Walker>, 700 std::forward_iterator_tag, T *> { 701 generic_def_path_iterator() {} 702 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} 703 704 T &operator*() const { return curNode(); } 705 706 generic_def_path_iterator &operator++() { 707 N = curNode().Previous; 708 return *this; 709 } 710 711 bool operator==(const generic_def_path_iterator &O) const { 712 if (N.hasValue() != O.N.hasValue()) 713 return false; 714 return !N.hasValue() || *N == *O.N; 715 } 716 717 private: 718 T &curNode() const { return W->Paths[*N]; } 719 720 Walker *W = nullptr; 721 Optional<ListIndex> N = None; 722 }; 723 724 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; 725 using const_def_path_iterator = 726 generic_def_path_iterator<const DefPath, const ClobberWalker>; 727 728 iterator_range<def_path_iterator> def_path(ListIndex From) { 729 return make_range(def_path_iterator(this, From), def_path_iterator()); 730 } 731 732 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { 733 return make_range(const_def_path_iterator(this, From), 734 const_def_path_iterator()); 735 } 736 737 struct OptznResult { 738 /// The path that contains our result. 739 TerminatedPath PrimaryClobber; 740 /// The paths that we can legally cache back from, but that aren't 741 /// necessarily the result of the Phi optimization. 742 SmallVector<TerminatedPath, 4> OtherClobbers; 743 }; 744 745 ListIndex defPathIndex(const DefPath &N) const { 746 // The assert looks nicer if we don't need to do &N 747 const DefPath *NP = &N; 748 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && 749 "Out of bounds DefPath!"); 750 return NP - &Paths.front(); 751 } 752 753 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths 754 /// that act as legal clobbers. Note that this won't return *all* clobbers. 755 /// 756 /// Phi optimization algorithm tl;dr: 757 /// - Find the earliest def/phi, A, we can optimize to 758 /// - Find if all paths from the starting memory access ultimately reach A 759 /// - If not, optimization isn't possible. 760 /// - Otherwise, walk from A to another clobber or phi, A'. 761 /// - If A' is a def, we're done. 762 /// - If A' is a phi, try to optimize it. 763 /// 764 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path 765 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. 766 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, 767 const MemoryLocation &Loc) { 768 assert(Paths.empty() && VisitedPhis.empty() && 769 "Reset the optimization state."); 770 771 Paths.emplace_back(Loc, Start, Phi, None); 772 // Stores how many "valid" optimization nodes we had prior to calling 773 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. 774 auto PriorPathsSize = Paths.size(); 775 776 SmallVector<ListIndex, 16> PausedSearches; 777 SmallVector<ListIndex, 8> NewPaused; 778 SmallVector<TerminatedPath, 4> TerminatedPaths; 779 780 addSearches(Phi, PausedSearches, 0); 781 782 // Moves the TerminatedPath with the "most dominated" Clobber to the end of 783 // Paths. 784 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { 785 assert(!Paths.empty() && "Need a path to move"); 786 auto Dom = Paths.begin(); 787 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) 788 if (!MSSA.dominates(I->Clobber, Dom->Clobber)) 789 Dom = I; 790 auto Last = Paths.end() - 1; 791 if (Last != Dom) 792 std::iter_swap(Last, Dom); 793 }; 794 795 MemoryPhi *Current = Phi; 796 while (true) { 797 assert(!MSSA.isLiveOnEntryDef(Current) && 798 "liveOnEntry wasn't treated as a clobber?"); 799 800 const auto *Target = getWalkTarget(Current); 801 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal 802 // optimization for the prior phi. 803 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) { 804 return MSSA.dominates(P.Clobber, Target); 805 })); 806 807 // FIXME: This is broken, because the Blocker may be reported to be 808 // liveOnEntry, and we'll happily wait for that to disappear (read: never) 809 // For the moment, this is fine, since we do nothing with blocker info. 810 if (Optional<TerminatedPath> Blocker = getBlockingAccess( 811 Target, PausedSearches, NewPaused, TerminatedPaths)) { 812 813 // Find the node we started at. We can't search based on N->Last, since 814 // we may have gone around a loop with a different MemoryLocation. 815 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { 816 return defPathIndex(N) < PriorPathsSize; 817 }); 818 assert(Iter != def_path_iterator()); 819 820 DefPath &CurNode = *Iter; 821 assert(CurNode.Last == Current); 822 823 // Two things: 824 // A. We can't reliably cache all of NewPaused back. Consider a case 825 // where we have two paths in NewPaused; one of which can't optimize 826 // above this phi, whereas the other can. If we cache the second path 827 // back, we'll end up with suboptimal cache entries. We can handle 828 // cases like this a bit better when we either try to find all 829 // clobbers that block phi optimization, or when our cache starts 830 // supporting unfinished searches. 831 // B. We can't reliably cache TerminatedPaths back here without doing 832 // extra checks; consider a case like: 833 // T 834 // / \ 835 // D C 836 // \ / 837 // S 838 // Where T is our target, C is a node with a clobber on it, D is a 839 // diamond (with a clobber *only* on the left or right node, N), and 840 // S is our start. Say we walk to D, through the node opposite N 841 // (read: ignoring the clobber), and see a cache entry in the top 842 // node of D. That cache entry gets put into TerminatedPaths. We then 843 // walk up to C (N is later in our worklist), find the clobber, and 844 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache 845 // the bottom part of D to the cached clobber, ignoring the clobber 846 // in N. Again, this problem goes away if we start tracking all 847 // blockers for a given phi optimization. 848 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; 849 return {Result, {}}; 850 } 851 852 // If there's nothing left to search, then all paths led to valid clobbers 853 // that we got from our cache; pick the nearest to the start, and allow 854 // the rest to be cached back. 855 if (NewPaused.empty()) { 856 MoveDominatedPathToEnd(TerminatedPaths); 857 TerminatedPath Result = TerminatedPaths.pop_back_val(); 858 return {Result, std::move(TerminatedPaths)}; 859 } 860 861 MemoryAccess *DefChainEnd = nullptr; 862 SmallVector<TerminatedPath, 4> Clobbers; 863 for (ListIndex Paused : NewPaused) { 864 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); 865 if (WR.IsKnownClobber) 866 Clobbers.push_back({WR.Result, Paused}); 867 else 868 // Micro-opt: If we hit the end of the chain, save it. 869 DefChainEnd = WR.Result; 870 } 871 872 if (!TerminatedPaths.empty()) { 873 // If we couldn't find the dominating phi/liveOnEntry in the above loop, 874 // do it now. 875 if (!DefChainEnd) 876 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) 877 DefChainEnd = MA; 878 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry"); 879 880 // If any of the terminated paths don't dominate the phi we'll try to 881 // optimize, we need to figure out what they are and quit. 882 const BasicBlock *ChainBB = DefChainEnd->getBlock(); 883 for (const TerminatedPath &TP : TerminatedPaths) { 884 // Because we know that DefChainEnd is as "high" as we can go, we 885 // don't need local dominance checks; BB dominance is sufficient. 886 if (DT.dominates(ChainBB, TP.Clobber->getBlock())) 887 Clobbers.push_back(TP); 888 } 889 } 890 891 // If we have clobbers in the def chain, find the one closest to Current 892 // and quit. 893 if (!Clobbers.empty()) { 894 MoveDominatedPathToEnd(Clobbers); 895 TerminatedPath Result = Clobbers.pop_back_val(); 896 return {Result, std::move(Clobbers)}; 897 } 898 899 assert(all_of(NewPaused, 900 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })); 901 902 // Because liveOnEntry is a clobber, this must be a phi. 903 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); 904 905 PriorPathsSize = Paths.size(); 906 PausedSearches.clear(); 907 for (ListIndex I : NewPaused) 908 addSearches(DefChainPhi, PausedSearches, I); 909 NewPaused.clear(); 910 911 Current = DefChainPhi; 912 } 913 } 914 915 void verifyOptResult(const OptznResult &R) const { 916 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) { 917 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); 918 })); 919 } 920 921 void resetPhiOptznState() { 922 Paths.clear(); 923 VisitedPhis.clear(); 924 } 925 926 public: 927 ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT) 928 : MSSA(MSSA), AA(AA), DT(DT) {} 929 930 AliasAnalysisType *getAA() { return &AA; } 931 /// Finds the nearest clobber for the given query, optimizing phis if 932 /// possible. 933 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, 934 unsigned &UpWalkLimit) { 935 Query = &Q; 936 UpwardWalkLimit = &UpWalkLimit; 937 // Starting limit must be > 0. 938 if (!UpWalkLimit) 939 UpWalkLimit++; 940 941 MemoryAccess *Current = Start; 942 // This walker pretends uses don't exist. If we're handed one, silently grab 943 // its def. (This has the nice side-effect of ensuring we never cache uses) 944 if (auto *MU = dyn_cast<MemoryUse>(Start)) 945 Current = MU->getDefiningAccess(); 946 947 DefPath FirstDesc(Q.StartingLoc, Current, Current, None); 948 // Fast path for the overly-common case (no crazy phi optimization 949 // necessary) 950 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); 951 MemoryAccess *Result; 952 if (WalkResult.IsKnownClobber) { 953 Result = WalkResult.Result; 954 Q.AR = WalkResult.AR; 955 } else { 956 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), 957 Current, Q.StartingLoc); 958 verifyOptResult(OptRes); 959 resetPhiOptznState(); 960 Result = OptRes.PrimaryClobber.Clobber; 961 } 962 963 #ifdef EXPENSIVE_CHECKS 964 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0) 965 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); 966 #endif 967 return Result; 968 } 969 }; 970 971 struct RenamePassData { 972 DomTreeNode *DTN; 973 DomTreeNode::const_iterator ChildIt; 974 MemoryAccess *IncomingVal; 975 976 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, 977 MemoryAccess *M) 978 : DTN(D), ChildIt(It), IncomingVal(M) {} 979 980 void swap(RenamePassData &RHS) { 981 std::swap(DTN, RHS.DTN); 982 std::swap(ChildIt, RHS.ChildIt); 983 std::swap(IncomingVal, RHS.IncomingVal); 984 } 985 }; 986 987 } // end anonymous namespace 988 989 namespace llvm { 990 991 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase { 992 ClobberWalker<AliasAnalysisType> Walker; 993 MemorySSA *MSSA; 994 995 public: 996 ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D) 997 : Walker(*M, *A, *D), MSSA(M) {} 998 999 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, 1000 const MemoryLocation &, 1001 unsigned &); 1002 // Third argument (bool), defines whether the clobber search should skip the 1003 // original queried access. If true, there will be a follow-up query searching 1004 // for a clobber access past "self". Note that the Optimized access is not 1005 // updated if a new clobber is found by this SkipSelf search. If this 1006 // additional query becomes heavily used we may decide to cache the result. 1007 // Walker instantiations will decide how to set the SkipSelf bool. 1008 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool); 1009 }; 1010 1011 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no 1012 /// longer does caching on its own, but the name has been retained for the 1013 /// moment. 1014 template <class AliasAnalysisType> 1015 class MemorySSA::CachingWalker final : public MemorySSAWalker { 1016 ClobberWalkerBase<AliasAnalysisType> *Walker; 1017 1018 public: 1019 CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) 1020 : MemorySSAWalker(M), Walker(W) {} 1021 ~CachingWalker() override = default; 1022 1023 using MemorySSAWalker::getClobberingMemoryAccess; 1024 1025 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { 1026 return Walker->getClobberingMemoryAccessBase(MA, UWL, false); 1027 } 1028 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1029 const MemoryLocation &Loc, 1030 unsigned &UWL) { 1031 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); 1032 } 1033 1034 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { 1035 unsigned UpwardWalkLimit = MaxCheckLimit; 1036 return getClobberingMemoryAccess(MA, UpwardWalkLimit); 1037 } 1038 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1039 const MemoryLocation &Loc) override { 1040 unsigned UpwardWalkLimit = MaxCheckLimit; 1041 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); 1042 } 1043 1044 void invalidateInfo(MemoryAccess *MA) override { 1045 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1046 MUD->resetOptimized(); 1047 } 1048 }; 1049 1050 template <class AliasAnalysisType> 1051 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { 1052 ClobberWalkerBase<AliasAnalysisType> *Walker; 1053 1054 public: 1055 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) 1056 : MemorySSAWalker(M), Walker(W) {} 1057 ~SkipSelfWalker() override = default; 1058 1059 using MemorySSAWalker::getClobberingMemoryAccess; 1060 1061 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { 1062 return Walker->getClobberingMemoryAccessBase(MA, UWL, true); 1063 } 1064 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1065 const MemoryLocation &Loc, 1066 unsigned &UWL) { 1067 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); 1068 } 1069 1070 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { 1071 unsigned UpwardWalkLimit = MaxCheckLimit; 1072 return getClobberingMemoryAccess(MA, UpwardWalkLimit); 1073 } 1074 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1075 const MemoryLocation &Loc) override { 1076 unsigned UpwardWalkLimit = MaxCheckLimit; 1077 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); 1078 } 1079 1080 void invalidateInfo(MemoryAccess *MA) override { 1081 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1082 MUD->resetOptimized(); 1083 } 1084 }; 1085 1086 } // end namespace llvm 1087 1088 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, 1089 bool RenameAllUses) { 1090 // Pass through values to our successors 1091 for (const BasicBlock *S : successors(BB)) { 1092 auto It = PerBlockAccesses.find(S); 1093 // Rename the phi nodes in our successor block 1094 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 1095 continue; 1096 AccessList *Accesses = It->second.get(); 1097 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 1098 if (RenameAllUses) { 1099 bool ReplacementDone = false; 1100 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) 1101 if (Phi->getIncomingBlock(I) == BB) { 1102 Phi->setIncomingValue(I, IncomingVal); 1103 ReplacementDone = true; 1104 } 1105 (void) ReplacementDone; 1106 assert(ReplacementDone && "Incomplete phi during partial rename"); 1107 } else 1108 Phi->addIncoming(IncomingVal, BB); 1109 } 1110 } 1111 1112 /// Rename a single basic block into MemorySSA form. 1113 /// Uses the standard SSA renaming algorithm. 1114 /// \returns The new incoming value. 1115 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, 1116 bool RenameAllUses) { 1117 auto It = PerBlockAccesses.find(BB); 1118 // Skip most processing if the list is empty. 1119 if (It != PerBlockAccesses.end()) { 1120 AccessList *Accesses = It->second.get(); 1121 for (MemoryAccess &L : *Accesses) { 1122 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { 1123 if (MUD->getDefiningAccess() == nullptr || RenameAllUses) 1124 MUD->setDefiningAccess(IncomingVal); 1125 if (isa<MemoryDef>(&L)) 1126 IncomingVal = &L; 1127 } else { 1128 IncomingVal = &L; 1129 } 1130 } 1131 } 1132 return IncomingVal; 1133 } 1134 1135 /// This is the standard SSA renaming algorithm. 1136 /// 1137 /// We walk the dominator tree in preorder, renaming accesses, and then filling 1138 /// in phi nodes in our successors. 1139 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, 1140 SmallPtrSetImpl<BasicBlock *> &Visited, 1141 bool SkipVisited, bool RenameAllUses) { 1142 assert(Root && "Trying to rename accesses in an unreachable block"); 1143 1144 SmallVector<RenamePassData, 32> WorkStack; 1145 // Skip everything if we already renamed this block and we are skipping. 1146 // Note: You can't sink this into the if, because we need it to occur 1147 // regardless of whether we skip blocks or not. 1148 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; 1149 if (SkipVisited && AlreadyVisited) 1150 return; 1151 1152 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); 1153 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); 1154 WorkStack.push_back({Root, Root->begin(), IncomingVal}); 1155 1156 while (!WorkStack.empty()) { 1157 DomTreeNode *Node = WorkStack.back().DTN; 1158 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; 1159 IncomingVal = WorkStack.back().IncomingVal; 1160 1161 if (ChildIt == Node->end()) { 1162 WorkStack.pop_back(); 1163 } else { 1164 DomTreeNode *Child = *ChildIt; 1165 ++WorkStack.back().ChildIt; 1166 BasicBlock *BB = Child->getBlock(); 1167 // Note: You can't sink this into the if, because we need it to occur 1168 // regardless of whether we skip blocks or not. 1169 AlreadyVisited = !Visited.insert(BB).second; 1170 if (SkipVisited && AlreadyVisited) { 1171 // We already visited this during our renaming, which can happen when 1172 // being asked to rename multiple blocks. Figure out the incoming val, 1173 // which is the last def. 1174 // Incoming value can only change if there is a block def, and in that 1175 // case, it's the last block def in the list. 1176 if (auto *BlockDefs = getWritableBlockDefs(BB)) 1177 IncomingVal = &*BlockDefs->rbegin(); 1178 } else 1179 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); 1180 renameSuccessorPhis(BB, IncomingVal, RenameAllUses); 1181 WorkStack.push_back({Child, Child->begin(), IncomingVal}); 1182 } 1183 } 1184 } 1185 1186 /// This handles unreachable block accesses by deleting phi nodes in 1187 /// unreachable blocks, and marking all other unreachable MemoryAccess's as 1188 /// being uses of the live on entry definition. 1189 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { 1190 assert(!DT->isReachableFromEntry(BB) && 1191 "Reachable block found while handling unreachable blocks"); 1192 1193 // Make sure phi nodes in our reachable successors end up with a 1194 // LiveOnEntryDef for our incoming edge, even though our block is forward 1195 // unreachable. We could just disconnect these blocks from the CFG fully, 1196 // but we do not right now. 1197 for (const BasicBlock *S : successors(BB)) { 1198 if (!DT->isReachableFromEntry(S)) 1199 continue; 1200 auto It = PerBlockAccesses.find(S); 1201 // Rename the phi nodes in our successor block 1202 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 1203 continue; 1204 AccessList *Accesses = It->second.get(); 1205 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 1206 Phi->addIncoming(LiveOnEntryDef.get(), BB); 1207 } 1208 1209 auto It = PerBlockAccesses.find(BB); 1210 if (It == PerBlockAccesses.end()) 1211 return; 1212 1213 auto &Accesses = It->second; 1214 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { 1215 auto Next = std::next(AI); 1216 // If we have a phi, just remove it. We are going to replace all 1217 // users with live on entry. 1218 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) 1219 UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); 1220 else 1221 Accesses->erase(AI); 1222 AI = Next; 1223 } 1224 } 1225 1226 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) 1227 : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), 1228 SkipWalker(nullptr), NextID(0) { 1229 // Build MemorySSA using a batch alias analysis. This reuses the internal 1230 // state that AA collects during an alias()/getModRefInfo() call. This is 1231 // safe because there are no CFG changes while building MemorySSA and can 1232 // significantly reduce the time spent by the compiler in AA, because we will 1233 // make queries about all the instructions in the Function. 1234 assert(AA && "No alias analysis?"); 1235 BatchAAResults BatchAA(*AA); 1236 buildMemorySSA(BatchAA); 1237 // Intentionally leave AA to nullptr while building so we don't accidently 1238 // use non-batch AliasAnalysis. 1239 this->AA = AA; 1240 // Also create the walker here. 1241 getWalker(); 1242 } 1243 1244 MemorySSA::~MemorySSA() { 1245 // Drop all our references 1246 for (const auto &Pair : PerBlockAccesses) 1247 for (MemoryAccess &MA : *Pair.second) 1248 MA.dropAllReferences(); 1249 } 1250 1251 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { 1252 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); 1253 1254 if (Res.second) 1255 Res.first->second = std::make_unique<AccessList>(); 1256 return Res.first->second.get(); 1257 } 1258 1259 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { 1260 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); 1261 1262 if (Res.second) 1263 Res.first->second = std::make_unique<DefsList>(); 1264 return Res.first->second.get(); 1265 } 1266 1267 namespace llvm { 1268 1269 /// This class is a batch walker of all MemoryUse's in the program, and points 1270 /// their defining access at the thing that actually clobbers them. Because it 1271 /// is a batch walker that touches everything, it does not operate like the 1272 /// other walkers. This walker is basically performing a top-down SSA renaming 1273 /// pass, where the version stack is used as the cache. This enables it to be 1274 /// significantly more time and memory efficient than using the regular walker, 1275 /// which is walking bottom-up. 1276 class MemorySSA::OptimizeUses { 1277 public: 1278 OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker, 1279 BatchAAResults *BAA, DominatorTree *DT) 1280 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} 1281 1282 void optimizeUses(); 1283 1284 private: 1285 /// This represents where a given memorylocation is in the stack. 1286 struct MemlocStackInfo { 1287 // This essentially is keeping track of versions of the stack. Whenever 1288 // the stack changes due to pushes or pops, these versions increase. 1289 unsigned long StackEpoch; 1290 unsigned long PopEpoch; 1291 // This is the lower bound of places on the stack to check. It is equal to 1292 // the place the last stack walk ended. 1293 // Note: Correctness depends on this being initialized to 0, which densemap 1294 // does 1295 unsigned long LowerBound; 1296 const BasicBlock *LowerBoundBlock; 1297 // This is where the last walk for this memory location ended. 1298 unsigned long LastKill; 1299 bool LastKillValid; 1300 Optional<AliasResult> AR; 1301 }; 1302 1303 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, 1304 SmallVectorImpl<MemoryAccess *> &, 1305 DenseMap<MemoryLocOrCall, MemlocStackInfo> &); 1306 1307 MemorySSA *MSSA; 1308 CachingWalker<BatchAAResults> *Walker; 1309 BatchAAResults *AA; 1310 DominatorTree *DT; 1311 }; 1312 1313 } // end namespace llvm 1314 1315 /// Optimize the uses in a given block This is basically the SSA renaming 1316 /// algorithm, with one caveat: We are able to use a single stack for all 1317 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is 1318 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just 1319 /// going to be some position in that stack of possible ones. 1320 /// 1321 /// We track the stack positions that each MemoryLocation needs 1322 /// to check, and last ended at. This is because we only want to check the 1323 /// things that changed since last time. The same MemoryLocation should 1324 /// get clobbered by the same store (getModRefInfo does not use invariantness or 1325 /// things like this, and if they start, we can modify MemoryLocOrCall to 1326 /// include relevant data) 1327 void MemorySSA::OptimizeUses::optimizeUsesInBlock( 1328 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, 1329 SmallVectorImpl<MemoryAccess *> &VersionStack, 1330 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { 1331 1332 /// If no accesses, nothing to do. 1333 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); 1334 if (Accesses == nullptr) 1335 return; 1336 1337 // Pop everything that doesn't dominate the current block off the stack, 1338 // increment the PopEpoch to account for this. 1339 while (true) { 1340 assert( 1341 !VersionStack.empty() && 1342 "Version stack should have liveOnEntry sentinel dominating everything"); 1343 BasicBlock *BackBlock = VersionStack.back()->getBlock(); 1344 if (DT->dominates(BackBlock, BB)) 1345 break; 1346 while (VersionStack.back()->getBlock() == BackBlock) 1347 VersionStack.pop_back(); 1348 ++PopEpoch; 1349 } 1350 1351 for (MemoryAccess &MA : *Accesses) { 1352 auto *MU = dyn_cast<MemoryUse>(&MA); 1353 if (!MU) { 1354 VersionStack.push_back(&MA); 1355 ++StackEpoch; 1356 continue; 1357 } 1358 1359 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { 1360 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); 1361 continue; 1362 } 1363 1364 MemoryLocOrCall UseMLOC(MU); 1365 auto &LocInfo = LocStackInfo[UseMLOC]; 1366 // If the pop epoch changed, it means we've removed stuff from top of 1367 // stack due to changing blocks. We may have to reset the lower bound or 1368 // last kill info. 1369 if (LocInfo.PopEpoch != PopEpoch) { 1370 LocInfo.PopEpoch = PopEpoch; 1371 LocInfo.StackEpoch = StackEpoch; 1372 // If the lower bound was in something that no longer dominates us, we 1373 // have to reset it. 1374 // We can't simply track stack size, because the stack may have had 1375 // pushes/pops in the meantime. 1376 // XXX: This is non-optimal, but only is slower cases with heavily 1377 // branching dominator trees. To get the optimal number of queries would 1378 // be to make lowerbound and lastkill a per-loc stack, and pop it until 1379 // the top of that stack dominates us. This does not seem worth it ATM. 1380 // A much cheaper optimization would be to always explore the deepest 1381 // branch of the dominator tree first. This will guarantee this resets on 1382 // the smallest set of blocks. 1383 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && 1384 !DT->dominates(LocInfo.LowerBoundBlock, BB)) { 1385 // Reset the lower bound of things to check. 1386 // TODO: Some day we should be able to reset to last kill, rather than 1387 // 0. 1388 LocInfo.LowerBound = 0; 1389 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); 1390 LocInfo.LastKillValid = false; 1391 } 1392 } else if (LocInfo.StackEpoch != StackEpoch) { 1393 // If all that has changed is the StackEpoch, we only have to check the 1394 // new things on the stack, because we've checked everything before. In 1395 // this case, the lower bound of things to check remains the same. 1396 LocInfo.PopEpoch = PopEpoch; 1397 LocInfo.StackEpoch = StackEpoch; 1398 } 1399 if (!LocInfo.LastKillValid) { 1400 LocInfo.LastKill = VersionStack.size() - 1; 1401 LocInfo.LastKillValid = true; 1402 LocInfo.AR = MayAlias; 1403 } 1404 1405 // At this point, we should have corrected last kill and LowerBound to be 1406 // in bounds. 1407 assert(LocInfo.LowerBound < VersionStack.size() && 1408 "Lower bound out of range"); 1409 assert(LocInfo.LastKill < VersionStack.size() && 1410 "Last kill info out of range"); 1411 // In any case, the new upper bound is the top of the stack. 1412 unsigned long UpperBound = VersionStack.size() - 1; 1413 1414 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { 1415 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " (" 1416 << *(MU->getMemoryInst()) << ")" 1417 << " because there are " 1418 << UpperBound - LocInfo.LowerBound 1419 << " stores to disambiguate\n"); 1420 // Because we did not walk, LastKill is no longer valid, as this may 1421 // have been a kill. 1422 LocInfo.LastKillValid = false; 1423 continue; 1424 } 1425 bool FoundClobberResult = false; 1426 unsigned UpwardWalkLimit = MaxCheckLimit; 1427 while (UpperBound > LocInfo.LowerBound) { 1428 if (isa<MemoryPhi>(VersionStack[UpperBound])) { 1429 // For phis, use the walker, see where we ended up, go there 1430 MemoryAccess *Result = 1431 Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit); 1432 // We are guaranteed to find it or something is wrong 1433 while (VersionStack[UpperBound] != Result) { 1434 assert(UpperBound != 0); 1435 --UpperBound; 1436 } 1437 FoundClobberResult = true; 1438 break; 1439 } 1440 1441 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); 1442 // If the lifetime of the pointer ends at this instruction, it's live on 1443 // entry. 1444 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { 1445 // Reset UpperBound to liveOnEntryDef's place in the stack 1446 UpperBound = 0; 1447 FoundClobberResult = true; 1448 LocInfo.AR = MustAlias; 1449 break; 1450 } 1451 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); 1452 if (CA.IsClobber) { 1453 FoundClobberResult = true; 1454 LocInfo.AR = CA.AR; 1455 break; 1456 } 1457 --UpperBound; 1458 } 1459 1460 // Note: Phis always have AliasResult AR set to MayAlias ATM. 1461 1462 // At the end of this loop, UpperBound is either a clobber, or lower bound 1463 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. 1464 if (FoundClobberResult || UpperBound < LocInfo.LastKill) { 1465 // We were last killed now by where we got to 1466 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) 1467 LocInfo.AR = None; 1468 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); 1469 LocInfo.LastKill = UpperBound; 1470 } else { 1471 // Otherwise, we checked all the new ones, and now we know we can get to 1472 // LastKill. 1473 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); 1474 } 1475 LocInfo.LowerBound = VersionStack.size() - 1; 1476 LocInfo.LowerBoundBlock = BB; 1477 } 1478 } 1479 1480 /// Optimize uses to point to their actual clobbering definitions. 1481 void MemorySSA::OptimizeUses::optimizeUses() { 1482 SmallVector<MemoryAccess *, 16> VersionStack; 1483 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; 1484 VersionStack.push_back(MSSA->getLiveOnEntryDef()); 1485 1486 unsigned long StackEpoch = 1; 1487 unsigned long PopEpoch = 1; 1488 // We perform a non-recursive top-down dominator tree walk. 1489 for (const auto *DomNode : depth_first(DT->getRootNode())) 1490 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, 1491 LocStackInfo); 1492 } 1493 1494 void MemorySSA::placePHINodes( 1495 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { 1496 // Determine where our MemoryPhi's should go 1497 ForwardIDFCalculator IDFs(*DT); 1498 IDFs.setDefiningBlocks(DefiningBlocks); 1499 SmallVector<BasicBlock *, 32> IDFBlocks; 1500 IDFs.calculate(IDFBlocks); 1501 1502 // Now place MemoryPhi nodes. 1503 for (auto &BB : IDFBlocks) 1504 createMemoryPhi(BB); 1505 } 1506 1507 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) { 1508 // We create an access to represent "live on entry", for things like 1509 // arguments or users of globals, where the memory they use is defined before 1510 // the beginning of the function. We do not actually insert it into the IR. 1511 // We do not define a live on exit for the immediate uses, and thus our 1512 // semantics do *not* imply that something with no immediate uses can simply 1513 // be removed. 1514 BasicBlock &StartingPoint = F.getEntryBlock(); 1515 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, 1516 &StartingPoint, NextID++)); 1517 1518 // We maintain lists of memory accesses per-block, trading memory for time. We 1519 // could just look up the memory access for every possible instruction in the 1520 // stream. 1521 SmallPtrSet<BasicBlock *, 32> DefiningBlocks; 1522 // Go through each block, figure out where defs occur, and chain together all 1523 // the accesses. 1524 for (BasicBlock &B : F) { 1525 bool InsertIntoDef = false; 1526 AccessList *Accesses = nullptr; 1527 DefsList *Defs = nullptr; 1528 for (Instruction &I : B) { 1529 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA); 1530 if (!MUD) 1531 continue; 1532 1533 if (!Accesses) 1534 Accesses = getOrCreateAccessList(&B); 1535 Accesses->push_back(MUD); 1536 if (isa<MemoryDef>(MUD)) { 1537 InsertIntoDef = true; 1538 if (!Defs) 1539 Defs = getOrCreateDefsList(&B); 1540 Defs->push_back(*MUD); 1541 } 1542 } 1543 if (InsertIntoDef) 1544 DefiningBlocks.insert(&B); 1545 } 1546 placePHINodes(DefiningBlocks); 1547 1548 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get 1549 // filled in with all blocks. 1550 SmallPtrSet<BasicBlock *, 16> Visited; 1551 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); 1552 1553 ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT); 1554 CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase); 1555 OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses(); 1556 1557 // Mark the uses in unreachable blocks as live on entry, so that they go 1558 // somewhere. 1559 for (auto &BB : F) 1560 if (!Visited.count(&BB)) 1561 markUnreachableAsLiveOnEntry(&BB); 1562 } 1563 1564 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } 1565 1566 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() { 1567 if (Walker) 1568 return Walker.get(); 1569 1570 if (!WalkerBase) 1571 WalkerBase = 1572 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); 1573 1574 Walker = 1575 std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get()); 1576 return Walker.get(); 1577 } 1578 1579 MemorySSAWalker *MemorySSA::getSkipSelfWalker() { 1580 if (SkipWalker) 1581 return SkipWalker.get(); 1582 1583 if (!WalkerBase) 1584 WalkerBase = 1585 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); 1586 1587 SkipWalker = 1588 std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get()); 1589 return SkipWalker.get(); 1590 } 1591 1592 1593 // This is a helper function used by the creation routines. It places NewAccess 1594 // into the access and defs lists for a given basic block, at the given 1595 // insertion point. 1596 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, 1597 const BasicBlock *BB, 1598 InsertionPlace Point) { 1599 auto *Accesses = getOrCreateAccessList(BB); 1600 if (Point == Beginning) { 1601 // If it's a phi node, it goes first, otherwise, it goes after any phi 1602 // nodes. 1603 if (isa<MemoryPhi>(NewAccess)) { 1604 Accesses->push_front(NewAccess); 1605 auto *Defs = getOrCreateDefsList(BB); 1606 Defs->push_front(*NewAccess); 1607 } else { 1608 auto AI = find_if_not( 1609 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1610 Accesses->insert(AI, NewAccess); 1611 if (!isa<MemoryUse>(NewAccess)) { 1612 auto *Defs = getOrCreateDefsList(BB); 1613 auto DI = find_if_not( 1614 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1615 Defs->insert(DI, *NewAccess); 1616 } 1617 } 1618 } else { 1619 Accesses->push_back(NewAccess); 1620 if (!isa<MemoryUse>(NewAccess)) { 1621 auto *Defs = getOrCreateDefsList(BB); 1622 Defs->push_back(*NewAccess); 1623 } 1624 } 1625 BlockNumberingValid.erase(BB); 1626 } 1627 1628 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, 1629 AccessList::iterator InsertPt) { 1630 auto *Accesses = getWritableBlockAccesses(BB); 1631 bool WasEnd = InsertPt == Accesses->end(); 1632 Accesses->insert(AccessList::iterator(InsertPt), What); 1633 if (!isa<MemoryUse>(What)) { 1634 auto *Defs = getOrCreateDefsList(BB); 1635 // If we got asked to insert at the end, we have an easy job, just shove it 1636 // at the end. If we got asked to insert before an existing def, we also get 1637 // an iterator. If we got asked to insert before a use, we have to hunt for 1638 // the next def. 1639 if (WasEnd) { 1640 Defs->push_back(*What); 1641 } else if (isa<MemoryDef>(InsertPt)) { 1642 Defs->insert(InsertPt->getDefsIterator(), *What); 1643 } else { 1644 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) 1645 ++InsertPt; 1646 // Either we found a def, or we are inserting at the end 1647 if (InsertPt == Accesses->end()) 1648 Defs->push_back(*What); 1649 else 1650 Defs->insert(InsertPt->getDefsIterator(), *What); 1651 } 1652 } 1653 BlockNumberingValid.erase(BB); 1654 } 1655 1656 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { 1657 // Keep it in the lookup tables, remove from the lists 1658 removeFromLists(What, false); 1659 1660 // Note that moving should implicitly invalidate the optimized state of a 1661 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a 1662 // MemoryDef. 1663 if (auto *MD = dyn_cast<MemoryDef>(What)) 1664 MD->resetOptimized(); 1665 What->setBlock(BB); 1666 } 1667 1668 // Move What before Where in the IR. The end result is that What will belong to 1669 // the right lists and have the right Block set, but will not otherwise be 1670 // correct. It will not have the right defining access, and if it is a def, 1671 // things below it will not properly be updated. 1672 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 1673 AccessList::iterator Where) { 1674 prepareForMoveTo(What, BB); 1675 insertIntoListsBefore(What, BB, Where); 1676 } 1677 1678 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, 1679 InsertionPlace Point) { 1680 if (isa<MemoryPhi>(What)) { 1681 assert(Point == Beginning && 1682 "Can only move a Phi at the beginning of the block"); 1683 // Update lookup table entry 1684 ValueToMemoryAccess.erase(What->getBlock()); 1685 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; 1686 (void)Inserted; 1687 assert(Inserted && "Cannot move a Phi to a block that already has one"); 1688 } 1689 1690 prepareForMoveTo(What, BB); 1691 insertIntoListsForBlock(What, BB, Point); 1692 } 1693 1694 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { 1695 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"); 1696 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); 1697 // Phi's always are placed at the front of the block. 1698 insertIntoListsForBlock(Phi, BB, Beginning); 1699 ValueToMemoryAccess[BB] = Phi; 1700 return Phi; 1701 } 1702 1703 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, 1704 MemoryAccess *Definition, 1705 const MemoryUseOrDef *Template, 1706 bool CreationMustSucceed) { 1707 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI"); 1708 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template); 1709 if (CreationMustSucceed) 1710 assert(NewAccess != nullptr && "Tried to create a memory access for a " 1711 "non-memory touching instruction"); 1712 if (NewAccess) 1713 NewAccess->setDefiningAccess(Definition); 1714 return NewAccess; 1715 } 1716 1717 // Return true if the instruction has ordering constraints. 1718 // Note specifically that this only considers stores and loads 1719 // because others are still considered ModRef by getModRefInfo. 1720 static inline bool isOrdered(const Instruction *I) { 1721 if (auto *SI = dyn_cast<StoreInst>(I)) { 1722 if (!SI->isUnordered()) 1723 return true; 1724 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 1725 if (!LI->isUnordered()) 1726 return true; 1727 } 1728 return false; 1729 } 1730 1731 /// Helper function to create new memory accesses 1732 template <typename AliasAnalysisType> 1733 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, 1734 AliasAnalysisType *AAP, 1735 const MemoryUseOrDef *Template) { 1736 // The assume intrinsic has a control dependency which we model by claiming 1737 // that it writes arbitrarily. Debuginfo intrinsics may be considered 1738 // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory 1739 // dependencies here. 1740 // FIXME: Replace this special casing with a more accurate modelling of 1741 // assume's control dependency. 1742 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1743 if (II->getIntrinsicID() == Intrinsic::assume) 1744 return nullptr; 1745 1746 // Using a nonstandard AA pipelines might leave us with unexpected modref 1747 // results for I, so add a check to not model instructions that may not read 1748 // from or write to memory. This is necessary for correctness. 1749 if (!I->mayReadFromMemory() && !I->mayWriteToMemory()) 1750 return nullptr; 1751 1752 bool Def, Use; 1753 if (Template) { 1754 Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr; 1755 Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr; 1756 #if !defined(NDEBUG) 1757 ModRefInfo ModRef = AAP->getModRefInfo(I, None); 1758 bool DefCheck, UseCheck; 1759 DefCheck = isModSet(ModRef) || isOrdered(I); 1760 UseCheck = isRefSet(ModRef); 1761 assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template"); 1762 #endif 1763 } else { 1764 // Find out what affect this instruction has on memory. 1765 ModRefInfo ModRef = AAP->getModRefInfo(I, None); 1766 // The isOrdered check is used to ensure that volatiles end up as defs 1767 // (atomics end up as ModRef right now anyway). Until we separate the 1768 // ordering chain from the memory chain, this enables people to see at least 1769 // some relative ordering to volatiles. Note that getClobberingMemoryAccess 1770 // will still give an answer that bypasses other volatile loads. TODO: 1771 // Separate memory aliasing and ordering into two different chains so that 1772 // we can precisely represent both "what memory will this read/write/is 1773 // clobbered by" and "what instructions can I move this past". 1774 Def = isModSet(ModRef) || isOrdered(I); 1775 Use = isRefSet(ModRef); 1776 } 1777 1778 // It's possible for an instruction to not modify memory at all. During 1779 // construction, we ignore them. 1780 if (!Def && !Use) 1781 return nullptr; 1782 1783 MemoryUseOrDef *MUD; 1784 if (Def) 1785 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); 1786 else 1787 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); 1788 ValueToMemoryAccess[I] = MUD; 1789 return MUD; 1790 } 1791 1792 /// Returns true if \p Replacer dominates \p Replacee . 1793 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, 1794 const MemoryAccess *Replacee) const { 1795 if (isa<MemoryUseOrDef>(Replacee)) 1796 return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); 1797 const auto *MP = cast<MemoryPhi>(Replacee); 1798 // For a phi node, the use occurs in the predecessor block of the phi node. 1799 // Since we may occur multiple times in the phi node, we have to check each 1800 // operand to ensure Replacer dominates each operand where Replacee occurs. 1801 for (const Use &Arg : MP->operands()) { 1802 if (Arg.get() != Replacee && 1803 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) 1804 return false; 1805 } 1806 return true; 1807 } 1808 1809 /// Properly remove \p MA from all of MemorySSA's lookup tables. 1810 void MemorySSA::removeFromLookups(MemoryAccess *MA) { 1811 assert(MA->use_empty() && 1812 "Trying to remove memory access that still has uses"); 1813 BlockNumbering.erase(MA); 1814 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1815 MUD->setDefiningAccess(nullptr); 1816 // Invalidate our walker's cache if necessary 1817 if (!isa<MemoryUse>(MA)) 1818 getWalker()->invalidateInfo(MA); 1819 1820 Value *MemoryInst; 1821 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1822 MemoryInst = MUD->getMemoryInst(); 1823 else 1824 MemoryInst = MA->getBlock(); 1825 1826 auto VMA = ValueToMemoryAccess.find(MemoryInst); 1827 if (VMA->second == MA) 1828 ValueToMemoryAccess.erase(VMA); 1829 } 1830 1831 /// Properly remove \p MA from all of MemorySSA's lists. 1832 /// 1833 /// Because of the way the intrusive list and use lists work, it is important to 1834 /// do removal in the right order. 1835 /// ShouldDelete defaults to true, and will cause the memory access to also be 1836 /// deleted, not just removed. 1837 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { 1838 BasicBlock *BB = MA->getBlock(); 1839 // The access list owns the reference, so we erase it from the non-owning list 1840 // first. 1841 if (!isa<MemoryUse>(MA)) { 1842 auto DefsIt = PerBlockDefs.find(BB); 1843 std::unique_ptr<DefsList> &Defs = DefsIt->second; 1844 Defs->remove(*MA); 1845 if (Defs->empty()) 1846 PerBlockDefs.erase(DefsIt); 1847 } 1848 1849 // The erase call here will delete it. If we don't want it deleted, we call 1850 // remove instead. 1851 auto AccessIt = PerBlockAccesses.find(BB); 1852 std::unique_ptr<AccessList> &Accesses = AccessIt->second; 1853 if (ShouldDelete) 1854 Accesses->erase(MA); 1855 else 1856 Accesses->remove(MA); 1857 1858 if (Accesses->empty()) { 1859 PerBlockAccesses.erase(AccessIt); 1860 BlockNumberingValid.erase(BB); 1861 } 1862 } 1863 1864 void MemorySSA::print(raw_ostream &OS) const { 1865 MemorySSAAnnotatedWriter Writer(this); 1866 F.print(OS, &Writer); 1867 } 1868 1869 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1870 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); } 1871 #endif 1872 1873 void MemorySSA::verifyMemorySSA() const { 1874 verifyOrderingDominationAndDefUses(F); 1875 verifyDominationNumbers(F); 1876 verifyPrevDefInPhis(F); 1877 // Previously, the verification used to also verify that the clobberingAccess 1878 // cached by MemorySSA is the same as the clobberingAccess found at a later 1879 // query to AA. This does not hold true in general due to the current fragility 1880 // of BasicAA which has arbitrary caps on the things it analyzes before giving 1881 // up. As a result, transformations that are correct, will lead to BasicAA 1882 // returning different Alias answers before and after that transformation. 1883 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so 1884 // random, in the worst case we'd need to rebuild MemorySSA from scratch after 1885 // every transformation, which defeats the purpose of using it. For such an 1886 // example, see test4 added in D51960. 1887 } 1888 1889 void MemorySSA::verifyPrevDefInPhis(Function &F) const { 1890 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) 1891 for (const BasicBlock &BB : F) { 1892 if (MemoryPhi *Phi = getMemoryAccess(&BB)) { 1893 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1894 auto *Pred = Phi->getIncomingBlock(I); 1895 auto *IncAcc = Phi->getIncomingValue(I); 1896 // If Pred has no unreachable predecessors, get last def looking at 1897 // IDoms. If, while walkings IDoms, any of these has an unreachable 1898 // predecessor, then the incoming def can be any access. 1899 if (auto *DTNode = DT->getNode(Pred)) { 1900 while (DTNode) { 1901 if (auto *DefList = getBlockDefs(DTNode->getBlock())) { 1902 auto *LastAcc = &*(--DefList->end()); 1903 assert(LastAcc == IncAcc && 1904 "Incorrect incoming access into phi."); 1905 break; 1906 } 1907 DTNode = DTNode->getIDom(); 1908 } 1909 } else { 1910 // If Pred has unreachable predecessors, but has at least a Def, the 1911 // incoming access can be the last Def in Pred, or it could have been 1912 // optimized to LoE. After an update, though, the LoE may have been 1913 // replaced by another access, so IncAcc may be any access. 1914 // If Pred has unreachable predecessors and no Defs, incoming access 1915 // should be LoE; However, after an update, it may be any access. 1916 } 1917 } 1918 } 1919 } 1920 #endif 1921 } 1922 1923 /// Verify that all of the blocks we believe to have valid domination numbers 1924 /// actually have valid domination numbers. 1925 void MemorySSA::verifyDominationNumbers(const Function &F) const { 1926 #ifndef NDEBUG 1927 if (BlockNumberingValid.empty()) 1928 return; 1929 1930 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; 1931 for (const BasicBlock &BB : F) { 1932 if (!ValidBlocks.count(&BB)) 1933 continue; 1934 1935 ValidBlocks.erase(&BB); 1936 1937 const AccessList *Accesses = getBlockAccesses(&BB); 1938 // It's correct to say an empty block has valid numbering. 1939 if (!Accesses) 1940 continue; 1941 1942 // Block numbering starts at 1. 1943 unsigned long LastNumber = 0; 1944 for (const MemoryAccess &MA : *Accesses) { 1945 auto ThisNumberIter = BlockNumbering.find(&MA); 1946 assert(ThisNumberIter != BlockNumbering.end() && 1947 "MemoryAccess has no domination number in a valid block!"); 1948 1949 unsigned long ThisNumber = ThisNumberIter->second; 1950 assert(ThisNumber > LastNumber && 1951 "Domination numbers should be strictly increasing!"); 1952 LastNumber = ThisNumber; 1953 } 1954 } 1955 1956 assert(ValidBlocks.empty() && 1957 "All valid BasicBlocks should exist in F -- dangling pointers?"); 1958 #endif 1959 } 1960 1961 /// Verify ordering: the order and existence of MemoryAccesses matches the 1962 /// order and existence of memory affecting instructions. 1963 /// Verify domination: each definition dominates all of its uses. 1964 /// Verify def-uses: the immediate use information - walk all the memory 1965 /// accesses and verifying that, for each use, it appears in the appropriate 1966 /// def's use list 1967 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F) const { 1968 #if !defined(NDEBUG) 1969 // Walk all the blocks, comparing what the lookups think and what the access 1970 // lists think, as well as the order in the blocks vs the order in the access 1971 // lists. 1972 SmallVector<MemoryAccess *, 32> ActualAccesses; 1973 SmallVector<MemoryAccess *, 32> ActualDefs; 1974 for (BasicBlock &B : F) { 1975 const AccessList *AL = getBlockAccesses(&B); 1976 const auto *DL = getBlockDefs(&B); 1977 MemoryPhi *Phi = getMemoryAccess(&B); 1978 if (Phi) { 1979 // Verify ordering. 1980 ActualAccesses.push_back(Phi); 1981 ActualDefs.push_back(Phi); 1982 // Verify domination 1983 for (const Use &U : Phi->uses()) 1984 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses"); 1985 #if defined(EXPENSIVE_CHECKS) 1986 // Verify def-uses. 1987 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance( 1988 pred_begin(&B), pred_end(&B))) && 1989 "Incomplete MemoryPhi Node"); 1990 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1991 verifyUseInDefs(Phi->getIncomingValue(I), Phi); 1992 assert(find(predecessors(&B), Phi->getIncomingBlock(I)) != 1993 pred_end(&B) && 1994 "Incoming phi block not a block predecessor"); 1995 } 1996 #endif 1997 } 1998 1999 for (Instruction &I : B) { 2000 MemoryUseOrDef *MA = getMemoryAccess(&I); 2001 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && 2002 "We have memory affecting instructions " 2003 "in this block but they are not in the " 2004 "access list or defs list"); 2005 if (MA) { 2006 // Verify ordering. 2007 ActualAccesses.push_back(MA); 2008 if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) { 2009 // Verify ordering. 2010 ActualDefs.push_back(MA); 2011 // Verify domination. 2012 for (const Use &U : MD->uses()) 2013 assert(dominates(MD, U) && 2014 "Memory Def does not dominate it's uses"); 2015 } 2016 #if defined(EXPENSIVE_CHECKS) 2017 // Verify def-uses. 2018 verifyUseInDefs(MA->getDefiningAccess(), MA); 2019 #endif 2020 } 2021 } 2022 // Either we hit the assert, really have no accesses, or we have both 2023 // accesses and an access list. Same with defs. 2024 if (!AL && !DL) 2025 continue; 2026 // Verify ordering. 2027 assert(AL->size() == ActualAccesses.size() && 2028 "We don't have the same number of accesses in the block as on the " 2029 "access list"); 2030 assert((DL || ActualDefs.size() == 0) && 2031 "Either we should have a defs list, or we should have no defs"); 2032 assert((!DL || DL->size() == ActualDefs.size()) && 2033 "We don't have the same number of defs in the block as on the " 2034 "def list"); 2035 auto ALI = AL->begin(); 2036 auto AAI = ActualAccesses.begin(); 2037 while (ALI != AL->end() && AAI != ActualAccesses.end()) { 2038 assert(&*ALI == *AAI && "Not the same accesses in the same order"); 2039 ++ALI; 2040 ++AAI; 2041 } 2042 ActualAccesses.clear(); 2043 if (DL) { 2044 auto DLI = DL->begin(); 2045 auto ADI = ActualDefs.begin(); 2046 while (DLI != DL->end() && ADI != ActualDefs.end()) { 2047 assert(&*DLI == *ADI && "Not the same defs in the same order"); 2048 ++DLI; 2049 ++ADI; 2050 } 2051 } 2052 ActualDefs.clear(); 2053 } 2054 #endif 2055 } 2056 2057 /// Verify the def-use lists in MemorySSA, by verifying that \p Use 2058 /// appears in the use list of \p Def. 2059 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { 2060 #ifndef NDEBUG 2061 // The live on entry use may cause us to get a NULL def here 2062 if (!Def) 2063 assert(isLiveOnEntryDef(Use) && 2064 "Null def but use not point to live on entry def"); 2065 else 2066 assert(is_contained(Def->users(), Use) && 2067 "Did not find use in def's use list"); 2068 #endif 2069 } 2070 2071 /// Perform a local numbering on blocks so that instruction ordering can be 2072 /// determined in constant time. 2073 /// TODO: We currently just number in order. If we numbered by N, we could 2074 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least 2075 /// log2(N) sequences of mixed before and after) without needing to invalidate 2076 /// the numbering. 2077 void MemorySSA::renumberBlock(const BasicBlock *B) const { 2078 // The pre-increment ensures the numbers really start at 1. 2079 unsigned long CurrentNumber = 0; 2080 const AccessList *AL = getBlockAccesses(B); 2081 assert(AL != nullptr && "Asking to renumber an empty block"); 2082 for (const auto &I : *AL) 2083 BlockNumbering[&I] = ++CurrentNumber; 2084 BlockNumberingValid.insert(B); 2085 } 2086 2087 /// Determine, for two memory accesses in the same block, 2088 /// whether \p Dominator dominates \p Dominatee. 2089 /// \returns True if \p Dominator dominates \p Dominatee. 2090 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, 2091 const MemoryAccess *Dominatee) const { 2092 const BasicBlock *DominatorBlock = Dominator->getBlock(); 2093 2094 assert((DominatorBlock == Dominatee->getBlock()) && 2095 "Asking for local domination when accesses are in different blocks!"); 2096 // A node dominates itself. 2097 if (Dominatee == Dominator) 2098 return true; 2099 2100 // When Dominatee is defined on function entry, it is not dominated by another 2101 // memory access. 2102 if (isLiveOnEntryDef(Dominatee)) 2103 return false; 2104 2105 // When Dominator is defined on function entry, it dominates the other memory 2106 // access. 2107 if (isLiveOnEntryDef(Dominator)) 2108 return true; 2109 2110 if (!BlockNumberingValid.count(DominatorBlock)) 2111 renumberBlock(DominatorBlock); 2112 2113 unsigned long DominatorNum = BlockNumbering.lookup(Dominator); 2114 // All numbers start with 1 2115 assert(DominatorNum != 0 && "Block was not numbered properly"); 2116 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); 2117 assert(DominateeNum != 0 && "Block was not numbered properly"); 2118 return DominatorNum < DominateeNum; 2119 } 2120 2121 bool MemorySSA::dominates(const MemoryAccess *Dominator, 2122 const MemoryAccess *Dominatee) const { 2123 if (Dominator == Dominatee) 2124 return true; 2125 2126 if (isLiveOnEntryDef(Dominatee)) 2127 return false; 2128 2129 if (Dominator->getBlock() != Dominatee->getBlock()) 2130 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); 2131 return locallyDominates(Dominator, Dominatee); 2132 } 2133 2134 bool MemorySSA::dominates(const MemoryAccess *Dominator, 2135 const Use &Dominatee) const { 2136 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { 2137 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); 2138 // The def must dominate the incoming block of the phi. 2139 if (UseBB != Dominator->getBlock()) 2140 return DT->dominates(Dominator->getBlock(), UseBB); 2141 // If the UseBB and the DefBB are the same, compare locally. 2142 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); 2143 } 2144 // If it's not a PHI node use, the normal dominates can already handle it. 2145 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); 2146 } 2147 2148 const static char LiveOnEntryStr[] = "liveOnEntry"; 2149 2150 void MemoryAccess::print(raw_ostream &OS) const { 2151 switch (getValueID()) { 2152 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); 2153 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); 2154 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); 2155 } 2156 llvm_unreachable("invalid value id"); 2157 } 2158 2159 void MemoryDef::print(raw_ostream &OS) const { 2160 MemoryAccess *UO = getDefiningAccess(); 2161 2162 auto printID = [&OS](MemoryAccess *A) { 2163 if (A && A->getID()) 2164 OS << A->getID(); 2165 else 2166 OS << LiveOnEntryStr; 2167 }; 2168 2169 OS << getID() << " = MemoryDef("; 2170 printID(UO); 2171 OS << ")"; 2172 2173 if (isOptimized()) { 2174 OS << "->"; 2175 printID(getOptimized()); 2176 2177 if (Optional<AliasResult> AR = getOptimizedAccessType()) 2178 OS << " " << *AR; 2179 } 2180 } 2181 2182 void MemoryPhi::print(raw_ostream &OS) const { 2183 bool First = true; 2184 OS << getID() << " = MemoryPhi("; 2185 for (const auto &Op : operands()) { 2186 BasicBlock *BB = getIncomingBlock(Op); 2187 MemoryAccess *MA = cast<MemoryAccess>(Op); 2188 if (!First) 2189 OS << ','; 2190 else 2191 First = false; 2192 2193 OS << '{'; 2194 if (BB->hasName()) 2195 OS << BB->getName(); 2196 else 2197 BB->printAsOperand(OS, false); 2198 OS << ','; 2199 if (unsigned ID = MA->getID()) 2200 OS << ID; 2201 else 2202 OS << LiveOnEntryStr; 2203 OS << '}'; 2204 } 2205 OS << ')'; 2206 } 2207 2208 void MemoryUse::print(raw_ostream &OS) const { 2209 MemoryAccess *UO = getDefiningAccess(); 2210 OS << "MemoryUse("; 2211 if (UO && UO->getID()) 2212 OS << UO->getID(); 2213 else 2214 OS << LiveOnEntryStr; 2215 OS << ')'; 2216 2217 if (Optional<AliasResult> AR = getOptimizedAccessType()) 2218 OS << " " << *AR; 2219 } 2220 2221 void MemoryAccess::dump() const { 2222 // Cannot completely remove virtual function even in release mode. 2223 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2224 print(dbgs()); 2225 dbgs() << "\n"; 2226 #endif 2227 } 2228 2229 char MemorySSAPrinterLegacyPass::ID = 0; 2230 2231 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { 2232 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); 2233 } 2234 2235 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { 2236 AU.setPreservesAll(); 2237 AU.addRequired<MemorySSAWrapperPass>(); 2238 } 2239 2240 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { 2241 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); 2242 MSSA.print(dbgs()); 2243 if (VerifyMemorySSA) 2244 MSSA.verifyMemorySSA(); 2245 return false; 2246 } 2247 2248 AnalysisKey MemorySSAAnalysis::Key; 2249 2250 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, 2251 FunctionAnalysisManager &AM) { 2252 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 2253 auto &AA = AM.getResult<AAManager>(F); 2254 return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT)); 2255 } 2256 2257 bool MemorySSAAnalysis::Result::invalidate( 2258 Function &F, const PreservedAnalyses &PA, 2259 FunctionAnalysisManager::Invalidator &Inv) { 2260 auto PAC = PA.getChecker<MemorySSAAnalysis>(); 2261 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 2262 Inv.invalidate<AAManager>(F, PA) || 2263 Inv.invalidate<DominatorTreeAnalysis>(F, PA); 2264 } 2265 2266 PreservedAnalyses MemorySSAPrinterPass::run(Function &F, 2267 FunctionAnalysisManager &AM) { 2268 OS << "MemorySSA for function: " << F.getName() << "\n"; 2269 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); 2270 2271 return PreservedAnalyses::all(); 2272 } 2273 2274 PreservedAnalyses MemorySSAVerifierPass::run(Function &F, 2275 FunctionAnalysisManager &AM) { 2276 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); 2277 2278 return PreservedAnalyses::all(); 2279 } 2280 2281 char MemorySSAWrapperPass::ID = 0; 2282 2283 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { 2284 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); 2285 } 2286 2287 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } 2288 2289 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 2290 AU.setPreservesAll(); 2291 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 2292 AU.addRequiredTransitive<AAResultsWrapperPass>(); 2293 } 2294 2295 bool MemorySSAWrapperPass::runOnFunction(Function &F) { 2296 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2297 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2298 MSSA.reset(new MemorySSA(F, &AA, &DT)); 2299 return false; 2300 } 2301 2302 void MemorySSAWrapperPass::verifyAnalysis() const { 2303 if (VerifyMemorySSA) 2304 MSSA->verifyMemorySSA(); 2305 } 2306 2307 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { 2308 MSSA->print(OS); 2309 } 2310 2311 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} 2312 2313 /// Walk the use-def chains starting at \p StartingAccess and find 2314 /// the MemoryAccess that actually clobbers Loc. 2315 /// 2316 /// \returns our clobbering memory access 2317 template <typename AliasAnalysisType> 2318 MemoryAccess * 2319 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( 2320 MemoryAccess *StartingAccess, const MemoryLocation &Loc, 2321 unsigned &UpwardWalkLimit) { 2322 if (isa<MemoryPhi>(StartingAccess)) 2323 return StartingAccess; 2324 2325 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); 2326 if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) 2327 return StartingUseOrDef; 2328 2329 Instruction *I = StartingUseOrDef->getMemoryInst(); 2330 2331 // Conservatively, fences are always clobbers, so don't perform the walk if we 2332 // hit a fence. 2333 if (!isa<CallBase>(I) && I->isFenceLike()) 2334 return StartingUseOrDef; 2335 2336 UpwardsMemoryQuery Q; 2337 Q.OriginalAccess = StartingUseOrDef; 2338 Q.StartingLoc = Loc; 2339 Q.Inst = I; 2340 Q.IsCall = false; 2341 2342 // Unlike the other function, do not walk to the def of a def, because we are 2343 // handed something we already believe is the clobbering access. 2344 // We never set SkipSelf to true in Q in this method. 2345 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) 2346 ? StartingUseOrDef->getDefiningAccess() 2347 : StartingUseOrDef; 2348 2349 MemoryAccess *Clobber = 2350 Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); 2351 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2352 LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n"); 2353 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); 2354 LLVM_DEBUG(dbgs() << *Clobber << "\n"); 2355 return Clobber; 2356 } 2357 2358 template <typename AliasAnalysisType> 2359 MemoryAccess * 2360 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( 2361 MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) { 2362 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); 2363 // If this is a MemoryPhi, we can't do anything. 2364 if (!StartingAccess) 2365 return MA; 2366 2367 bool IsOptimized = false; 2368 2369 // If this is an already optimized use or def, return the optimized result. 2370 // Note: Currently, we store the optimized def result in a separate field, 2371 // since we can't use the defining access. 2372 if (StartingAccess->isOptimized()) { 2373 if (!SkipSelf || !isa<MemoryDef>(StartingAccess)) 2374 return StartingAccess->getOptimized(); 2375 IsOptimized = true; 2376 } 2377 2378 const Instruction *I = StartingAccess->getMemoryInst(); 2379 // We can't sanely do anything with a fence, since they conservatively clobber 2380 // all memory, and have no locations to get pointers from to try to 2381 // disambiguate. 2382 if (!isa<CallBase>(I) && I->isFenceLike()) 2383 return StartingAccess; 2384 2385 UpwardsMemoryQuery Q(I, StartingAccess); 2386 2387 if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) { 2388 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); 2389 StartingAccess->setOptimized(LiveOnEntry); 2390 StartingAccess->setOptimizedAccessType(None); 2391 return LiveOnEntry; 2392 } 2393 2394 MemoryAccess *OptimizedAccess; 2395 if (!IsOptimized) { 2396 // Start with the thing we already think clobbers this location 2397 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); 2398 2399 // At this point, DefiningAccess may be the live on entry def. 2400 // If it is, we will not get a better result. 2401 if (MSSA->isLiveOnEntryDef(DefiningAccess)) { 2402 StartingAccess->setOptimized(DefiningAccess); 2403 StartingAccess->setOptimizedAccessType(None); 2404 return DefiningAccess; 2405 } 2406 2407 OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); 2408 StartingAccess->setOptimized(OptimizedAccess); 2409 if (MSSA->isLiveOnEntryDef(OptimizedAccess)) 2410 StartingAccess->setOptimizedAccessType(None); 2411 else if (Q.AR == MustAlias) 2412 StartingAccess->setOptimizedAccessType(MustAlias); 2413 } else 2414 OptimizedAccess = StartingAccess->getOptimized(); 2415 2416 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2417 LLVM_DEBUG(dbgs() << *StartingAccess << "\n"); 2418 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is "); 2419 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n"); 2420 2421 MemoryAccess *Result; 2422 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) && 2423 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) { 2424 assert(isa<MemoryDef>(Q.OriginalAccess)); 2425 Q.SkipSelfAccess = true; 2426 Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit); 2427 } else 2428 Result = OptimizedAccess; 2429 2430 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf); 2431 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n"); 2432 2433 return Result; 2434 } 2435 2436 MemoryAccess * 2437 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { 2438 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) 2439 return Use->getDefiningAccess(); 2440 return MA; 2441 } 2442 2443 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( 2444 MemoryAccess *StartingAccess, const MemoryLocation &) { 2445 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) 2446 return Use->getDefiningAccess(); 2447 return StartingAccess; 2448 } 2449 2450 void MemoryPhi::deleteMe(DerivedUser *Self) { 2451 delete static_cast<MemoryPhi *>(Self); 2452 } 2453 2454 void MemoryDef::deleteMe(DerivedUser *Self) { 2455 delete static_cast<MemoryDef *>(Self); 2456 } 2457 2458 void MemoryUse::deleteMe(DerivedUser *Self) { 2459 delete static_cast<MemoryUse *>(Self); 2460 } 2461