1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------===// 8 // 9 // This file implements the MemorySSAUpdater class. 10 // 11 //===----------------------------------------------------------------===// 12 #include "llvm/Analysis/MemorySSAUpdater.h" 13 #include "llvm/ADT/STLExtras.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/ADT/SmallPtrSet.h" 16 #include "llvm/Analysis/IteratedDominanceFrontier.h" 17 #include "llvm/Analysis/LoopIterator.h" 18 #include "llvm/Analysis/MemorySSA.h" 19 #include "llvm/IR/BasicBlock.h" 20 #include "llvm/IR/Dominators.h" 21 #include "llvm/Support/Debug.h" 22 #include <algorithm> 23 24 #define DEBUG_TYPE "memoryssa" 25 using namespace llvm; 26 27 // This is the marker algorithm from "Simple and Efficient Construction of 28 // Static Single Assignment Form" 29 // The simple, non-marker algorithm places phi nodes at any join 30 // Here, we place markers, and only place phi nodes if they end up necessary. 31 // They are only necessary if they break a cycle (IE we recursively visit 32 // ourselves again), or we discover, while getting the value of the operands, 33 // that there are two or more definitions needing to be merged. 34 // This still will leave non-minimal form in the case of irreducible control 35 // flow, where phi nodes may be in cycles with themselves, but unnecessary. 36 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive( 37 BasicBlock *BB, 38 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) { 39 // First, do a cache lookup. Without this cache, certain CFG structures 40 // (like a series of if statements) take exponential time to visit. 41 auto Cached = CachedPreviousDef.find(BB); 42 if (Cached != CachedPreviousDef.end()) 43 return Cached->second; 44 45 // If this method is called from an unreachable block, return LoE. 46 if (!MSSA->DT->isReachableFromEntry(BB)) 47 return MSSA->getLiveOnEntryDef(); 48 49 if (BasicBlock *Pred = BB->getUniquePredecessor()) { 50 VisitedBlocks.insert(BB); 51 // Single predecessor case, just recurse, we can only have one definition. 52 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef); 53 CachedPreviousDef.insert({BB, Result}); 54 return Result; 55 } 56 57 if (VisitedBlocks.count(BB)) { 58 // We hit our node again, meaning we had a cycle, we must insert a phi 59 // node to break it so we have an operand. The only case this will 60 // insert useless phis is if we have irreducible control flow. 61 MemoryAccess *Result = MSSA->createMemoryPhi(BB); 62 CachedPreviousDef.insert({BB, Result}); 63 return Result; 64 } 65 66 if (VisitedBlocks.insert(BB).second) { 67 // Mark us visited so we can detect a cycle 68 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps; 69 70 // Recurse to get the values in our predecessors for placement of a 71 // potential phi node. This will insert phi nodes if we cycle in order to 72 // break the cycle and have an operand. 73 bool UniqueIncomingAccess = true; 74 MemoryAccess *SingleAccess = nullptr; 75 for (auto *Pred : predecessors(BB)) { 76 if (MSSA->DT->isReachableFromEntry(Pred)) { 77 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef); 78 if (!SingleAccess) 79 SingleAccess = IncomingAccess; 80 else if (IncomingAccess != SingleAccess) 81 UniqueIncomingAccess = false; 82 PhiOps.push_back(IncomingAccess); 83 } else 84 PhiOps.push_back(MSSA->getLiveOnEntryDef()); 85 } 86 87 // Now try to simplify the ops to avoid placing a phi. 88 // This may return null if we never created a phi yet, that's okay 89 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB)); 90 91 // See if we can avoid the phi by simplifying it. 92 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps); 93 // If we couldn't simplify, we may have to create a phi 94 if (Result == Phi && UniqueIncomingAccess && SingleAccess) { 95 // A concrete Phi only exists if we created an empty one to break a cycle. 96 if (Phi) { 97 assert(Phi->operands().empty() && "Expected empty Phi"); 98 Phi->replaceAllUsesWith(SingleAccess); 99 removeMemoryAccess(Phi); 100 } 101 Result = SingleAccess; 102 } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) { 103 if (!Phi) 104 Phi = MSSA->createMemoryPhi(BB); 105 106 // See if the existing phi operands match what we need. 107 // Unlike normal SSA, we only allow one phi node per block, so we can't just 108 // create a new one. 109 if (Phi->getNumOperands() != 0) { 110 // FIXME: Figure out whether this is dead code and if so remove it. 111 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) { 112 // These will have been filled in by the recursive read we did above. 113 llvm::copy(PhiOps, Phi->op_begin()); 114 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin()); 115 } 116 } else { 117 unsigned i = 0; 118 for (auto *Pred : predecessors(BB)) 119 Phi->addIncoming(&*PhiOps[i++], Pred); 120 InsertedPHIs.push_back(Phi); 121 } 122 Result = Phi; 123 } 124 125 // Set ourselves up for the next variable by resetting visited state. 126 VisitedBlocks.erase(BB); 127 CachedPreviousDef.insert({BB, Result}); 128 return Result; 129 } 130 llvm_unreachable("Should have hit one of the three cases above"); 131 } 132 133 // This starts at the memory access, and goes backwards in the block to find the 134 // previous definition. If a definition is not found the block of the access, 135 // it continues globally, creating phi nodes to ensure we have a single 136 // definition. 137 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) { 138 if (auto *LocalResult = getPreviousDefInBlock(MA)) 139 return LocalResult; 140 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef; 141 return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef); 142 } 143 144 // This starts at the memory access, and goes backwards in the block to the find 145 // the previous definition. If the definition is not found in the block of the 146 // access, it returns nullptr. 147 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) { 148 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock()); 149 150 // It's possible there are no defs, or we got handed the first def to start. 151 if (Defs) { 152 // If this is a def, we can just use the def iterators. 153 if (!isa<MemoryUse>(MA)) { 154 auto Iter = MA->getReverseDefsIterator(); 155 ++Iter; 156 if (Iter != Defs->rend()) 157 return &*Iter; 158 } else { 159 // Otherwise, have to walk the all access iterator. 160 auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend(); 161 for (auto &U : make_range(++MA->getReverseIterator(), End)) 162 if (!isa<MemoryUse>(U)) 163 return cast<MemoryAccess>(&U); 164 // Note that if MA comes before Defs->begin(), we won't hit a def. 165 return nullptr; 166 } 167 } 168 return nullptr; 169 } 170 171 // This starts at the end of block 172 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd( 173 BasicBlock *BB, 174 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) { 175 auto *Defs = MSSA->getWritableBlockDefs(BB); 176 177 if (Defs) { 178 CachedPreviousDef.insert({BB, &*Defs->rbegin()}); 179 return &*Defs->rbegin(); 180 } 181 182 return getPreviousDefRecursive(BB, CachedPreviousDef); 183 } 184 // Recurse over a set of phi uses to eliminate the trivial ones 185 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) { 186 if (!Phi) 187 return nullptr; 188 TrackingVH<MemoryAccess> Res(Phi); 189 SmallVector<TrackingVH<Value>, 8> Uses; 190 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses)); 191 for (auto &U : Uses) 192 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) 193 tryRemoveTrivialPhi(UsePhi); 194 return Res; 195 } 196 197 // Eliminate trivial phis 198 // Phis are trivial if they are defined either by themselves, or all the same 199 // argument. 200 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c) 201 // We recursively try to remove them. 202 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) { 203 assert(Phi && "Can only remove concrete Phi."); 204 auto OperRange = Phi->operands(); 205 return tryRemoveTrivialPhi(Phi, OperRange); 206 } 207 template <class RangeType> 208 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi, 209 RangeType &Operands) { 210 // Bail out on non-opt Phis. 211 if (NonOptPhis.count(Phi)) 212 return Phi; 213 214 // Detect equal or self arguments 215 MemoryAccess *Same = nullptr; 216 for (auto &Op : Operands) { 217 // If the same or self, good so far 218 if (Op == Phi || Op == Same) 219 continue; 220 // not the same, return the phi since it's not eliminatable by us 221 if (Same) 222 return Phi; 223 Same = cast<MemoryAccess>(&*Op); 224 } 225 // Never found a non-self reference, the phi is undef 226 if (Same == nullptr) 227 return MSSA->getLiveOnEntryDef(); 228 if (Phi) { 229 Phi->replaceAllUsesWith(Same); 230 removeMemoryAccess(Phi); 231 } 232 233 // We should only end up recursing in case we replaced something, in which 234 // case, we may have made other Phis trivial. 235 return recursePhi(Same); 236 } 237 238 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) { 239 VisitedBlocks.clear(); 240 InsertedPHIs.clear(); 241 MU->setDefiningAccess(getPreviousDef(MU)); 242 243 // In cases without unreachable blocks, because uses do not create new 244 // may-defs, there are only two cases: 245 // 1. There was a def already below us, and therefore, we should not have 246 // created a phi node because it was already needed for the def. 247 // 248 // 2. There is no def below us, and therefore, there is no extra renaming work 249 // to do. 250 251 // In cases with unreachable blocks, where the unnecessary Phis were 252 // optimized out, adding the Use may re-insert those Phis. Hence, when 253 // inserting Uses outside of the MSSA creation process, and new Phis were 254 // added, rename all uses if we are asked. 255 256 if (!RenameUses && !InsertedPHIs.empty()) { 257 auto *Defs = MSSA->getBlockDefs(MU->getBlock()); 258 (void)Defs; 259 assert((!Defs || (++Defs->begin() == Defs->end())) && 260 "Block may have only a Phi or no defs"); 261 } 262 263 if (RenameUses && InsertedPHIs.size()) { 264 SmallPtrSet<BasicBlock *, 16> Visited; 265 BasicBlock *StartBlock = MU->getBlock(); 266 267 if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) { 268 MemoryAccess *FirstDef = &*Defs->begin(); 269 // Convert to incoming value if it's a memorydef. A phi *is* already an 270 // incoming value. 271 if (auto *MD = dyn_cast<MemoryDef>(FirstDef)) 272 FirstDef = MD->getDefiningAccess(); 273 274 MSSA->renamePass(MU->getBlock(), FirstDef, Visited); 275 } 276 // We just inserted a phi into this block, so the incoming value will 277 // become the phi anyway, so it does not matter what we pass. 278 for (auto &MP : InsertedPHIs) 279 if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP)) 280 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 281 } 282 } 283 284 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef. 285 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, 286 MemoryAccess *NewDef) { 287 // Replace any operand with us an incoming block with the new defining 288 // access. 289 int i = MP->getBasicBlockIndex(BB); 290 assert(i != -1 && "Should have found the basic block in the phi"); 291 // We can't just compare i against getNumOperands since one is signed and the 292 // other not. So use it to index into the block iterator. 293 for (const BasicBlock *BlockBB : llvm::drop_begin(MP->blocks(), i)) { 294 if (BlockBB != BB) 295 break; 296 MP->setIncomingValue(i, NewDef); 297 ++i; 298 } 299 } 300 301 // A brief description of the algorithm: 302 // First, we compute what should define the new def, using the SSA 303 // construction algorithm. 304 // Then, we update the defs below us (and any new phi nodes) in the graph to 305 // point to the correct new defs, to ensure we only have one variable, and no 306 // disconnected stores. 307 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) { 308 // Don't bother updating dead code. 309 if (!MSSA->DT->isReachableFromEntry(MD->getBlock())) { 310 MD->setDefiningAccess(MSSA->getLiveOnEntryDef()); 311 return; 312 } 313 314 VisitedBlocks.clear(); 315 InsertedPHIs.clear(); 316 317 // See if we had a local def, and if not, go hunting. 318 MemoryAccess *DefBefore = getPreviousDef(MD); 319 bool DefBeforeSameBlock = false; 320 if (DefBefore->getBlock() == MD->getBlock() && 321 !(isa<MemoryPhi>(DefBefore) && 322 llvm::is_contained(InsertedPHIs, DefBefore))) 323 DefBeforeSameBlock = true; 324 325 // There is a def before us, which means we can replace any store/phi uses 326 // of that thing with us, since we are in the way of whatever was there 327 // before. 328 // We now define that def's memorydefs and memoryphis 329 if (DefBeforeSameBlock) { 330 DefBefore->replaceUsesWithIf(MD, [MD](Use &U) { 331 // Leave the MemoryUses alone. 332 // Also make sure we skip ourselves to avoid self references. 333 User *Usr = U.getUser(); 334 return !isa<MemoryUse>(Usr) && Usr != MD; 335 // Defs are automatically unoptimized when the user is set to MD below, 336 // because the isOptimized() call will fail to find the same ID. 337 }); 338 } 339 340 // and that def is now our defining access. 341 MD->setDefiningAccess(DefBefore); 342 343 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end()); 344 345 SmallSet<WeakVH, 8> ExistingPhis; 346 347 // Remember the index where we may insert new phis. 348 unsigned NewPhiIndex = InsertedPHIs.size(); 349 if (!DefBeforeSameBlock) { 350 // If there was a local def before us, we must have the same effect it 351 // did. Because every may-def is the same, any phis/etc we would create, it 352 // would also have created. If there was no local def before us, we 353 // performed a global update, and have to search all successors and make 354 // sure we update the first def in each of them (following all paths until 355 // we hit the first def along each path). This may also insert phi nodes. 356 // TODO: There are other cases we can skip this work, such as when we have a 357 // single successor, and only used a straight line of single pred blocks 358 // backwards to find the def. To make that work, we'd have to track whether 359 // getDefRecursive only ever used the single predecessor case. These types 360 // of paths also only exist in between CFG simplifications. 361 362 // If this is the first def in the block and this insert is in an arbitrary 363 // place, compute IDF and place phis. 364 SmallPtrSet<BasicBlock *, 2> DefiningBlocks; 365 366 // If this is the last Def in the block, we may need additional Phis. 367 // Compute IDF in all cases, as renaming needs to be done even when MD is 368 // not the last access, because it can introduce a new access past which a 369 // previous access was optimized; that access needs to be reoptimized. 370 DefiningBlocks.insert(MD->getBlock()); 371 for (const auto &VH : InsertedPHIs) 372 if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH)) 373 DefiningBlocks.insert(RealPHI->getBlock()); 374 ForwardIDFCalculator IDFs(*MSSA->DT); 375 SmallVector<BasicBlock *, 32> IDFBlocks; 376 IDFs.setDefiningBlocks(DefiningBlocks); 377 IDFs.calculate(IDFBlocks); 378 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs; 379 for (auto *BBIDF : IDFBlocks) { 380 auto *MPhi = MSSA->getMemoryAccess(BBIDF); 381 if (!MPhi) { 382 MPhi = MSSA->createMemoryPhi(BBIDF); 383 NewInsertedPHIs.push_back(MPhi); 384 } else { 385 ExistingPhis.insert(MPhi); 386 } 387 // Add the phis created into the IDF blocks to NonOptPhis, so they are not 388 // optimized out as trivial by the call to getPreviousDefFromEnd below. 389 // Once they are complete, all these Phis are added to the FixupList, and 390 // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may 391 // need fixing as well, and potentially be trivial before this insertion, 392 // hence add all IDF Phis. See PR43044. 393 NonOptPhis.insert(MPhi); 394 } 395 for (auto &MPhi : NewInsertedPHIs) { 396 auto *BBIDF = MPhi->getBlock(); 397 for (auto *Pred : predecessors(BBIDF)) { 398 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef; 399 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred); 400 } 401 } 402 403 // Re-take the index where we're adding the new phis, because the above call 404 // to getPreviousDefFromEnd, may have inserted into InsertedPHIs. 405 NewPhiIndex = InsertedPHIs.size(); 406 for (auto &MPhi : NewInsertedPHIs) { 407 InsertedPHIs.push_back(&*MPhi); 408 FixupList.push_back(&*MPhi); 409 } 410 411 FixupList.push_back(MD); 412 } 413 414 // Remember the index where we stopped inserting new phis above, since the 415 // fixupDefs call in the loop below may insert more, that are already minimal. 416 unsigned NewPhiIndexEnd = InsertedPHIs.size(); 417 418 while (!FixupList.empty()) { 419 unsigned StartingPHISize = InsertedPHIs.size(); 420 fixupDefs(FixupList); 421 FixupList.clear(); 422 // Put any new phis on the fixup list, and process them 423 FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end()); 424 } 425 426 // Optimize potentially non-minimal phis added in this method. 427 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex; 428 if (NewPhiSize) 429 tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize)); 430 431 // Now that all fixups are done, rename all uses if we are asked. The defs are 432 // guaranteed to be in reachable code due to the check at the method entry. 433 BasicBlock *StartBlock = MD->getBlock(); 434 if (RenameUses) { 435 SmallPtrSet<BasicBlock *, 16> Visited; 436 // We are guaranteed there is a def in the block, because we just got it 437 // handed to us in this function. 438 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin(); 439 // Convert to incoming value if it's a memorydef. A phi *is* already an 440 // incoming value. 441 if (auto *MD = dyn_cast<MemoryDef>(FirstDef)) 442 FirstDef = MD->getDefiningAccess(); 443 444 MSSA->renamePass(MD->getBlock(), FirstDef, Visited); 445 // We just inserted a phi into this block, so the incoming value will become 446 // the phi anyway, so it does not matter what we pass. 447 for (auto &MP : InsertedPHIs) { 448 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP); 449 if (Phi) 450 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 451 } 452 // Existing Phi blocks may need renaming too, if an access was previously 453 // optimized and the inserted Defs "covers" the Optimized value. 454 for (const auto &MP : ExistingPhis) { 455 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP); 456 if (Phi) 457 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 458 } 459 } 460 } 461 462 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) { 463 SmallPtrSet<const BasicBlock *, 8> Seen; 464 SmallVector<const BasicBlock *, 16> Worklist; 465 for (const auto &Var : Vars) { 466 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var); 467 if (!NewDef) 468 continue; 469 // First, see if there is a local def after the operand. 470 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock()); 471 auto DefIter = NewDef->getDefsIterator(); 472 473 // The temporary Phi is being fixed, unmark it for not to optimize. 474 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef)) 475 NonOptPhis.erase(Phi); 476 477 // If there is a local def after us, we only have to rename that. 478 if (++DefIter != Defs->end()) { 479 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef); 480 continue; 481 } 482 483 // Otherwise, we need to search down through the CFG. 484 // For each of our successors, handle it directly if their is a phi, or 485 // place on the fixup worklist. 486 for (const auto *S : successors(NewDef->getBlock())) { 487 if (auto *MP = MSSA->getMemoryAccess(S)) 488 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef); 489 else 490 Worklist.push_back(S); 491 } 492 493 while (!Worklist.empty()) { 494 const BasicBlock *FixupBlock = Worklist.pop_back_val(); 495 496 // Get the first def in the block that isn't a phi node. 497 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) { 498 auto *FirstDef = &*Defs->begin(); 499 // The loop above and below should have taken care of phi nodes 500 assert(!isa<MemoryPhi>(FirstDef) && 501 "Should have already handled phi nodes!"); 502 // We are now this def's defining access, make sure we actually dominate 503 // it 504 assert(MSSA->dominates(NewDef, FirstDef) && 505 "Should have dominated the new access"); 506 507 // This may insert new phi nodes, because we are not guaranteed the 508 // block we are processing has a single pred, and depending where the 509 // store was inserted, it may require phi nodes below it. 510 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef)); 511 return; 512 } 513 // We didn't find a def, so we must continue. 514 for (const auto *S : successors(FixupBlock)) { 515 // If there is a phi node, handle it. 516 // Otherwise, put the block on the worklist 517 if (auto *MP = MSSA->getMemoryAccess(S)) 518 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef); 519 else { 520 // If we cycle, we should have ended up at a phi node that we already 521 // processed. FIXME: Double check this 522 if (!Seen.insert(S).second) 523 continue; 524 Worklist.push_back(S); 525 } 526 } 527 } 528 } 529 } 530 531 void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) { 532 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) { 533 MPhi->unorderedDeleteIncomingBlock(From); 534 tryRemoveTrivialPhi(MPhi); 535 } 536 } 537 538 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From, 539 const BasicBlock *To) { 540 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) { 541 bool Found = false; 542 MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) { 543 if (From != B) 544 return false; 545 if (Found) 546 return true; 547 Found = true; 548 return false; 549 }); 550 tryRemoveTrivialPhi(MPhi); 551 } 552 } 553 554 /// If all arguments of a MemoryPHI are defined by the same incoming 555 /// argument, return that argument. 556 static MemoryAccess *onlySingleValue(MemoryPhi *MP) { 557 MemoryAccess *MA = nullptr; 558 559 for (auto &Arg : MP->operands()) { 560 if (!MA) 561 MA = cast<MemoryAccess>(Arg); 562 else if (MA != Arg) 563 return nullptr; 564 } 565 return MA; 566 } 567 568 static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA, 569 const ValueToValueMapTy &VMap, 570 PhiToDefMap &MPhiMap, 571 bool CloneWasSimplified, 572 MemorySSA *MSSA) { 573 MemoryAccess *InsnDefining = MA; 574 if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) { 575 if (!MSSA->isLiveOnEntryDef(DefMUD)) { 576 Instruction *DefMUDI = DefMUD->getMemoryInst(); 577 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction."); 578 if (Instruction *NewDefMUDI = 579 cast_or_null<Instruction>(VMap.lookup(DefMUDI))) { 580 InsnDefining = MSSA->getMemoryAccess(NewDefMUDI); 581 if (!CloneWasSimplified) 582 assert(InsnDefining && "Defining instruction cannot be nullptr."); 583 else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) { 584 // The clone was simplified, it's no longer a MemoryDef, look up. 585 auto DefIt = DefMUD->getDefsIterator(); 586 // Since simplified clones only occur in single block cloning, a 587 // previous definition must exist, otherwise NewDefMUDI would not 588 // have been found in VMap. 589 assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() && 590 "Previous def must exist"); 591 InsnDefining = getNewDefiningAccessForClone( 592 &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA); 593 } 594 } 595 } 596 } else { 597 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining); 598 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi)) 599 InsnDefining = NewDefPhi; 600 } 601 assert(InsnDefining && "Defining instruction cannot be nullptr."); 602 return InsnDefining; 603 } 604 605 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB, 606 const ValueToValueMapTy &VMap, 607 PhiToDefMap &MPhiMap, 608 bool CloneWasSimplified) { 609 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB); 610 if (!Acc) 611 return; 612 for (const MemoryAccess &MA : *Acc) { 613 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) { 614 Instruction *Insn = MUD->getMemoryInst(); 615 // Entry does not exist if the clone of the block did not clone all 616 // instructions. This occurs in LoopRotate when cloning instructions 617 // from the old header to the old preheader. The cloned instruction may 618 // also be a simplified Value, not an Instruction (see LoopRotate). 619 // Also in LoopRotate, even when it's an instruction, due to it being 620 // simplified, it may be a Use rather than a Def, so we cannot use MUD as 621 // template. Calls coming from updateForClonedBlockIntoPred, ensure this. 622 if (Instruction *NewInsn = 623 dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) { 624 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess( 625 NewInsn, 626 getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap, 627 MPhiMap, CloneWasSimplified, MSSA), 628 /*Template=*/CloneWasSimplified ? nullptr : MUD, 629 /*CreationMustSucceed=*/CloneWasSimplified ? false : true); 630 if (NewUseOrDef) 631 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End); 632 } 633 } 634 } 635 } 636 637 void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock( 638 BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) { 639 auto *MPhi = MSSA->getMemoryAccess(Header); 640 if (!MPhi) 641 return; 642 643 // Create phi node in the backedge block and populate it with the same 644 // incoming values as MPhi. Skip incoming values coming from Preheader. 645 auto *NewMPhi = MSSA->createMemoryPhi(BEBlock); 646 bool HasUniqueIncomingValue = true; 647 MemoryAccess *UniqueValue = nullptr; 648 for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) { 649 BasicBlock *IBB = MPhi->getIncomingBlock(I); 650 MemoryAccess *IV = MPhi->getIncomingValue(I); 651 if (IBB != Preheader) { 652 NewMPhi->addIncoming(IV, IBB); 653 if (HasUniqueIncomingValue) { 654 if (!UniqueValue) 655 UniqueValue = IV; 656 else if (UniqueValue != IV) 657 HasUniqueIncomingValue = false; 658 } 659 } 660 } 661 662 // Update incoming edges into MPhi. Remove all but the incoming edge from 663 // Preheader. Add an edge from NewMPhi 664 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader); 665 MPhi->setIncomingValue(0, AccFromPreheader); 666 MPhi->setIncomingBlock(0, Preheader); 667 for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I) 668 MPhi->unorderedDeleteIncoming(I); 669 MPhi->addIncoming(NewMPhi, BEBlock); 670 671 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be 672 // replaced with the unique value. 673 tryRemoveTrivialPhi(NewMPhi); 674 } 675 676 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, 677 ArrayRef<BasicBlock *> ExitBlocks, 678 const ValueToValueMapTy &VMap, 679 bool IgnoreIncomingWithNoClones) { 680 PhiToDefMap MPhiMap; 681 682 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) { 683 assert(Phi && NewPhi && "Invalid Phi nodes."); 684 BasicBlock *NewPhiBB = NewPhi->getBlock(); 685 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB), 686 pred_end(NewPhiBB)); 687 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) { 688 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It); 689 BasicBlock *IncBB = Phi->getIncomingBlock(It); 690 691 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB))) 692 IncBB = NewIncBB; 693 else if (IgnoreIncomingWithNoClones) 694 continue; 695 696 // Now we have IncBB, and will need to add incoming from it to NewPhi. 697 698 // If IncBB is not a predecessor of NewPhiBB, then do not add it. 699 // NewPhiBB was cloned without that edge. 700 if (!NewPhiBBPreds.count(IncBB)) 701 continue; 702 703 // Determine incoming value and add it as incoming from IncBB. 704 if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) { 705 if (!MSSA->isLiveOnEntryDef(IncMUD)) { 706 Instruction *IncI = IncMUD->getMemoryInst(); 707 assert(IncI && "Found MemoryUseOrDef with no Instruction."); 708 if (Instruction *NewIncI = 709 cast_or_null<Instruction>(VMap.lookup(IncI))) { 710 IncMUD = MSSA->getMemoryAccess(NewIncI); 711 assert(IncMUD && 712 "MemoryUseOrDef cannot be null, all preds processed."); 713 } 714 } 715 NewPhi->addIncoming(IncMUD, IncBB); 716 } else { 717 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess); 718 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi)) 719 NewPhi->addIncoming(NewDefPhi, IncBB); 720 else 721 NewPhi->addIncoming(IncPhi, IncBB); 722 } 723 } 724 if (auto *SingleAccess = onlySingleValue(NewPhi)) { 725 MPhiMap[Phi] = SingleAccess; 726 removeMemoryAccess(NewPhi); 727 } 728 }; 729 730 auto ProcessBlock = [&](BasicBlock *BB) { 731 BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB)); 732 if (!NewBlock) 733 return; 734 735 assert(!MSSA->getWritableBlockAccesses(NewBlock) && 736 "Cloned block should have no accesses"); 737 738 // Add MemoryPhi. 739 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) { 740 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock); 741 MPhiMap[MPhi] = NewPhi; 742 } 743 // Update Uses and Defs. 744 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap); 745 }; 746 747 for (auto *BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks)) 748 ProcessBlock(BB); 749 750 for (auto *BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks)) 751 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) 752 if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi)) 753 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi)); 754 } 755 756 void MemorySSAUpdater::updateForClonedBlockIntoPred( 757 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) { 758 // All defs/phis from outside BB that are used in BB, are valid uses in P1. 759 // Since those defs/phis must have dominated BB, and also dominate P1. 760 // Defs from BB being used in BB will be replaced with the cloned defs from 761 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the 762 // incoming def into the Phi from P1. 763 // Instructions cloned into the predecessor are in practice sometimes 764 // simplified, so disable the use of the template, and create an access from 765 // scratch. 766 PhiToDefMap MPhiMap; 767 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) 768 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1); 769 cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true); 770 } 771 772 template <typename Iter> 773 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop( 774 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd, 775 DominatorTree &DT) { 776 SmallVector<CFGUpdate, 4> Updates; 777 // Update/insert phis in all successors of exit blocks. 778 for (auto *Exit : ExitBlocks) 779 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd)) 780 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) { 781 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0); 782 Updates.push_back({DT.Insert, NewExit, ExitSucc}); 783 } 784 applyInsertUpdates(Updates, DT); 785 } 786 787 void MemorySSAUpdater::updateExitBlocksForClonedLoop( 788 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap, 789 DominatorTree &DT) { 790 const ValueToValueMapTy *const Arr[] = {&VMap}; 791 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr), 792 std::end(Arr), DT); 793 } 794 795 void MemorySSAUpdater::updateExitBlocksForClonedLoop( 796 ArrayRef<BasicBlock *> ExitBlocks, 797 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) { 798 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) { 799 return I.get(); 800 }; 801 using MappedIteratorType = 802 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *, 803 decltype(GetPtr)>; 804 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr); 805 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr); 806 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT); 807 } 808 809 void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates, 810 DominatorTree &DT, bool UpdateDT) { 811 SmallVector<CFGUpdate, 4> DeleteUpdates; 812 SmallVector<CFGUpdate, 4> RevDeleteUpdates; 813 SmallVector<CFGUpdate, 4> InsertUpdates; 814 for (const auto &Update : Updates) { 815 if (Update.getKind() == DT.Insert) 816 InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()}); 817 else { 818 DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()}); 819 RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()}); 820 } 821 } 822 823 if (!DeleteUpdates.empty()) { 824 if (!InsertUpdates.empty()) { 825 if (!UpdateDT) { 826 SmallVector<CFGUpdate, 0> Empty; 827 // Deletes are reversed applied, because this CFGView is pretending the 828 // deletes did not happen yet, hence the edges still exist. 829 DT.applyUpdates(Empty, RevDeleteUpdates); 830 } else { 831 // Apply all updates, with the RevDeleteUpdates as PostCFGView. 832 DT.applyUpdates(Updates, RevDeleteUpdates); 833 } 834 835 // Note: the MSSA update below doesn't distinguish between a GD with 836 // (RevDelete,false) and (Delete, true), but this matters for the DT 837 // updates above; for "children" purposes they are equivalent; but the 838 // updates themselves convey the desired update, used inside DT only. 839 GraphDiff<BasicBlock *> GD(RevDeleteUpdates); 840 applyInsertUpdates(InsertUpdates, DT, &GD); 841 // Update DT to redelete edges; this matches the real CFG so we can 842 // perform the standard update without a postview of the CFG. 843 DT.applyUpdates(DeleteUpdates); 844 } else { 845 if (UpdateDT) 846 DT.applyUpdates(DeleteUpdates); 847 } 848 } else { 849 if (UpdateDT) 850 DT.applyUpdates(Updates); 851 GraphDiff<BasicBlock *> GD; 852 applyInsertUpdates(InsertUpdates, DT, &GD); 853 } 854 855 // Update for deleted edges 856 for (auto &Update : DeleteUpdates) 857 removeEdge(Update.getFrom(), Update.getTo()); 858 } 859 860 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates, 861 DominatorTree &DT) { 862 GraphDiff<BasicBlock *> GD; 863 applyInsertUpdates(Updates, DT, &GD); 864 } 865 866 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates, 867 DominatorTree &DT, 868 const GraphDiff<BasicBlock *> *GD) { 869 // Get recursive last Def, assuming well formed MSSA and updated DT. 870 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * { 871 while (true) { 872 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB); 873 // Return last Def or Phi in BB, if it exists. 874 if (Defs) 875 return &*(--Defs->end()); 876 877 // Check number of predecessors, we only care if there's more than one. 878 unsigned Count = 0; 879 BasicBlock *Pred = nullptr; 880 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) { 881 Pred = Pi; 882 Count++; 883 if (Count == 2) 884 break; 885 } 886 887 // If BB has multiple predecessors, get last definition from IDom. 888 if (Count != 1) { 889 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its 890 // DT is invalidated. Return LoE as its last def. This will be added to 891 // MemoryPhi node, and later deleted when the block is deleted. 892 if (!DT.getNode(BB)) 893 return MSSA->getLiveOnEntryDef(); 894 if (auto *IDom = DT.getNode(BB)->getIDom()) 895 if (IDom->getBlock() != BB) { 896 BB = IDom->getBlock(); 897 continue; 898 } 899 return MSSA->getLiveOnEntryDef(); 900 } else { 901 // Single predecessor, BB cannot be dead. GetLastDef of Pred. 902 assert(Count == 1 && Pred && "Single predecessor expected."); 903 // BB can be unreachable though, return LoE if that is the case. 904 if (!DT.getNode(BB)) 905 return MSSA->getLiveOnEntryDef(); 906 BB = Pred; 907 } 908 }; 909 llvm_unreachable("Unable to get last definition."); 910 }; 911 912 // Get nearest IDom given a set of blocks. 913 // TODO: this can be optimized by starting the search at the node with the 914 // lowest level (highest in the tree). 915 auto FindNearestCommonDominator = 916 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * { 917 BasicBlock *PrevIDom = *BBSet.begin(); 918 for (auto *BB : BBSet) 919 PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB); 920 return PrevIDom; 921 }; 922 923 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not 924 // include CurrIDom. 925 auto GetNoLongerDomBlocks = 926 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom, 927 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) { 928 if (PrevIDom == CurrIDom) 929 return; 930 BlocksPrevDom.push_back(PrevIDom); 931 BasicBlock *NextIDom = PrevIDom; 932 while (BasicBlock *UpIDom = 933 DT.getNode(NextIDom)->getIDom()->getBlock()) { 934 if (UpIDom == CurrIDom) 935 break; 936 BlocksPrevDom.push_back(UpIDom); 937 NextIDom = UpIDom; 938 } 939 }; 940 941 // Map a BB to its predecessors: added + previously existing. To get a 942 // deterministic order, store predecessors as SetVectors. The order in each 943 // will be defined by the order in Updates (fixed) and the order given by 944 // children<> (also fixed). Since we further iterate over these ordered sets, 945 // we lose the information of multiple edges possibly existing between two 946 // blocks, so we'll keep and EdgeCount map for that. 947 // An alternate implementation could keep unordered set for the predecessors, 948 // traverse either Updates or children<> each time to get the deterministic 949 // order, and drop the usage of EdgeCount. This alternate approach would still 950 // require querying the maps for each predecessor, and children<> call has 951 // additional computation inside for creating the snapshot-graph predecessors. 952 // As such, we favor using a little additional storage and less compute time. 953 // This decision can be revisited if we find the alternative more favorable. 954 955 struct PredInfo { 956 SmallSetVector<BasicBlock *, 2> Added; 957 SmallSetVector<BasicBlock *, 2> Prev; 958 }; 959 SmallDenseMap<BasicBlock *, PredInfo> PredMap; 960 961 for (const auto &Edge : Updates) { 962 BasicBlock *BB = Edge.getTo(); 963 auto &AddedBlockSet = PredMap[BB].Added; 964 AddedBlockSet.insert(Edge.getFrom()); 965 } 966 967 // Store all existing predecessor for each BB, at least one must exist. 968 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap; 969 SmallPtrSet<BasicBlock *, 2> NewBlocks; 970 for (auto &BBPredPair : PredMap) { 971 auto *BB = BBPredPair.first; 972 const auto &AddedBlockSet = BBPredPair.second.Added; 973 auto &PrevBlockSet = BBPredPair.second.Prev; 974 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) { 975 if (!AddedBlockSet.count(Pi)) 976 PrevBlockSet.insert(Pi); 977 EdgeCountMap[{Pi, BB}]++; 978 } 979 980 if (PrevBlockSet.empty()) { 981 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added."); 982 LLVM_DEBUG( 983 dbgs() 984 << "Adding a predecessor to a block with no predecessors. " 985 "This must be an edge added to a new, likely cloned, block. " 986 "Its memory accesses must be already correct, assuming completed " 987 "via the updateExitBlocksForClonedLoop API. " 988 "Assert a single such edge is added so no phi addition or " 989 "additional processing is required.\n"); 990 assert(AddedBlockSet.size() == 1 && 991 "Can only handle adding one predecessor to a new block."); 992 // Need to remove new blocks from PredMap. Remove below to not invalidate 993 // iterator here. 994 NewBlocks.insert(BB); 995 } 996 } 997 // Nothing to process for new/cloned blocks. 998 for (auto *BB : NewBlocks) 999 PredMap.erase(BB); 1000 1001 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace; 1002 SmallVector<WeakVH, 8> InsertedPhis; 1003 1004 // First create MemoryPhis in all blocks that don't have one. Create in the 1005 // order found in Updates, not in PredMap, to get deterministic numbering. 1006 for (const auto &Edge : Updates) { 1007 BasicBlock *BB = Edge.getTo(); 1008 if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB)) 1009 InsertedPhis.push_back(MSSA->createMemoryPhi(BB)); 1010 } 1011 1012 // Now we'll fill in the MemoryPhis with the right incoming values. 1013 for (auto &BBPredPair : PredMap) { 1014 auto *BB = BBPredPair.first; 1015 const auto &PrevBlockSet = BBPredPair.second.Prev; 1016 const auto &AddedBlockSet = BBPredPair.second.Added; 1017 assert(!PrevBlockSet.empty() && 1018 "At least one previous predecessor must exist."); 1019 1020 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by 1021 // keeping this map before the loop. We can reuse already populated entries 1022 // if an edge is added from the same predecessor to two different blocks, 1023 // and this does happen in rotate. Note that the map needs to be updated 1024 // when deleting non-necessary phis below, if the phi is in the map by 1025 // replacing the value with DefP1. 1026 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred; 1027 for (auto *AddedPred : AddedBlockSet) { 1028 auto *DefPn = GetLastDef(AddedPred); 1029 assert(DefPn != nullptr && "Unable to find last definition."); 1030 LastDefAddedPred[AddedPred] = DefPn; 1031 } 1032 1033 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB); 1034 // If Phi is not empty, add an incoming edge from each added pred. Must 1035 // still compute blocks with defs to replace for this block below. 1036 if (NewPhi->getNumOperands()) { 1037 for (auto *Pred : AddedBlockSet) { 1038 auto *LastDefForPred = LastDefAddedPred[Pred]; 1039 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1040 NewPhi->addIncoming(LastDefForPred, Pred); 1041 } 1042 } else { 1043 // Pick any existing predecessor and get its definition. All other 1044 // existing predecessors should have the same one, since no phi existed. 1045 auto *P1 = *PrevBlockSet.begin(); 1046 MemoryAccess *DefP1 = GetLastDef(P1); 1047 1048 // Check DefP1 against all Defs in LastDefPredPair. If all the same, 1049 // nothing to add. 1050 bool InsertPhi = false; 1051 for (auto LastDefPredPair : LastDefAddedPred) 1052 if (DefP1 != LastDefPredPair.second) { 1053 InsertPhi = true; 1054 break; 1055 } 1056 if (!InsertPhi) { 1057 // Since NewPhi may be used in other newly added Phis, replace all uses 1058 // of NewPhi with the definition coming from all predecessors (DefP1), 1059 // before deleting it. 1060 NewPhi->replaceAllUsesWith(DefP1); 1061 removeMemoryAccess(NewPhi); 1062 continue; 1063 } 1064 1065 // Update Phi with new values for new predecessors and old value for all 1066 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered 1067 // sets, the order of entries in NewPhi is deterministic. 1068 for (auto *Pred : AddedBlockSet) { 1069 auto *LastDefForPred = LastDefAddedPred[Pred]; 1070 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1071 NewPhi->addIncoming(LastDefForPred, Pred); 1072 } 1073 for (auto *Pred : PrevBlockSet) 1074 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1075 NewPhi->addIncoming(DefP1, Pred); 1076 } 1077 1078 // Get all blocks that used to dominate BB and no longer do after adding 1079 // AddedBlockSet, where PrevBlockSet are the previously known predecessors. 1080 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom"); 1081 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet); 1082 assert(PrevIDom && "Previous IDom should exists"); 1083 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock(); 1084 assert(NewIDom && "BB should have a new valid idom"); 1085 assert(DT.dominates(NewIDom, PrevIDom) && 1086 "New idom should dominate old idom"); 1087 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace); 1088 } 1089 1090 tryRemoveTrivialPhis(InsertedPhis); 1091 // Create the set of blocks that now have a definition. We'll use this to 1092 // compute IDF and add Phis there next. 1093 SmallVector<BasicBlock *, 8> BlocksToProcess; 1094 for (auto &VH : InsertedPhis) 1095 if (auto *MPhi = cast_or_null<MemoryPhi>(VH)) 1096 BlocksToProcess.push_back(MPhi->getBlock()); 1097 1098 // Compute IDF and add Phis in all IDF blocks that do not have one. 1099 SmallVector<BasicBlock *, 32> IDFBlocks; 1100 if (!BlocksToProcess.empty()) { 1101 ForwardIDFCalculator IDFs(DT, GD); 1102 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(), 1103 BlocksToProcess.end()); 1104 IDFs.setDefiningBlocks(DefiningBlocks); 1105 IDFs.calculate(IDFBlocks); 1106 1107 SmallSetVector<MemoryPhi *, 4> PhisToFill; 1108 // First create all needed Phis. 1109 for (auto *BBIDF : IDFBlocks) 1110 if (!MSSA->getMemoryAccess(BBIDF)) { 1111 auto *IDFPhi = MSSA->createMemoryPhi(BBIDF); 1112 InsertedPhis.push_back(IDFPhi); 1113 PhisToFill.insert(IDFPhi); 1114 } 1115 // Then update or insert their correct incoming values. 1116 for (auto *BBIDF : IDFBlocks) { 1117 auto *IDFPhi = MSSA->getMemoryAccess(BBIDF); 1118 assert(IDFPhi && "Phi must exist"); 1119 if (!PhisToFill.count(IDFPhi)) { 1120 // Update existing Phi. 1121 // FIXME: some updates may be redundant, try to optimize and skip some. 1122 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I) 1123 IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I))); 1124 } else { 1125 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF)) 1126 IDFPhi->addIncoming(GetLastDef(Pi), Pi); 1127 } 1128 } 1129 } 1130 1131 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no 1132 // longer dominate, replace those with the closest dominating def. 1133 // This will also update optimized accesses, as they're also uses. 1134 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) { 1135 if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) { 1136 for (auto &DefToReplaceUses : *DefsList) { 1137 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock(); 1138 for (Use &U : llvm::make_early_inc_range(DefToReplaceUses.uses())) { 1139 MemoryAccess *Usr = cast<MemoryAccess>(U.getUser()); 1140 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) { 1141 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U); 1142 if (!DT.dominates(DominatingBlock, DominatedBlock)) 1143 U.set(GetLastDef(DominatedBlock)); 1144 } else { 1145 BasicBlock *DominatedBlock = Usr->getBlock(); 1146 if (!DT.dominates(DominatingBlock, DominatedBlock)) { 1147 if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock)) 1148 U.set(DomBlPhi); 1149 else { 1150 auto *IDom = DT.getNode(DominatedBlock)->getIDom(); 1151 assert(IDom && "Block must have a valid IDom."); 1152 U.set(GetLastDef(IDom->getBlock())); 1153 } 1154 cast<MemoryUseOrDef>(Usr)->resetOptimized(); 1155 } 1156 } 1157 } 1158 } 1159 } 1160 } 1161 tryRemoveTrivialPhis(InsertedPhis); 1162 } 1163 1164 // Move What before Where in the MemorySSA IR. 1165 template <class WhereType> 1166 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 1167 WhereType Where) { 1168 // Mark MemoryPhi users of What not to be optimized. 1169 for (auto *U : What->users()) 1170 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U)) 1171 NonOptPhis.insert(PhiUser); 1172 1173 // Replace all our users with our defining access. 1174 What->replaceAllUsesWith(What->getDefiningAccess()); 1175 1176 // Let MemorySSA take care of moving it around in the lists. 1177 MSSA->moveTo(What, BB, Where); 1178 1179 // Now reinsert it into the IR and do whatever fixups needed. 1180 if (auto *MD = dyn_cast<MemoryDef>(What)) 1181 insertDef(MD, /*RenameUses=*/true); 1182 else 1183 insertUse(cast<MemoryUse>(What), /*RenameUses=*/true); 1184 1185 // Clear dangling pointers. We added all MemoryPhi users, but not all 1186 // of them are removed by fixupDefs(). 1187 NonOptPhis.clear(); 1188 } 1189 1190 // Move What before Where in the MemorySSA IR. 1191 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 1192 moveTo(What, Where->getBlock(), Where->getIterator()); 1193 } 1194 1195 // Move What after Where in the MemorySSA IR. 1196 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 1197 moveTo(What, Where->getBlock(), ++Where->getIterator()); 1198 } 1199 1200 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, 1201 MemorySSA::InsertionPlace Where) { 1202 if (Where != MemorySSA::InsertionPlace::BeforeTerminator) 1203 return moveTo(What, BB, Where); 1204 1205 if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator())) 1206 return moveBefore(What, Where); 1207 else 1208 return moveTo(What, BB, MemorySSA::InsertionPlace::End); 1209 } 1210 1211 // All accesses in To used to be in From. Move to end and update access lists. 1212 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To, 1213 Instruction *Start) { 1214 1215 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From); 1216 if (!Accs) 1217 return; 1218 1219 assert(Start->getParent() == To && "Incorrect Start instruction"); 1220 MemoryAccess *FirstInNew = nullptr; 1221 for (Instruction &I : make_range(Start->getIterator(), To->end())) 1222 if ((FirstInNew = MSSA->getMemoryAccess(&I))) 1223 break; 1224 if (FirstInNew) { 1225 auto *MUD = cast<MemoryUseOrDef>(FirstInNew); 1226 do { 1227 auto NextIt = ++MUD->getIterator(); 1228 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end()) 1229 ? nullptr 1230 : cast<MemoryUseOrDef>(&*NextIt); 1231 MSSA->moveTo(MUD, To, MemorySSA::End); 1232 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need 1233 // to retrieve it again. 1234 Accs = MSSA->getWritableBlockAccesses(From); 1235 MUD = NextMUD; 1236 } while (MUD); 1237 } 1238 1239 // If all accesses were moved and only a trivial Phi remains, we try to remove 1240 // that Phi. This is needed when From is going to be deleted. 1241 auto *Defs = MSSA->getWritableBlockDefs(From); 1242 if (Defs && !Defs->empty()) 1243 if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin())) 1244 tryRemoveTrivialPhi(Phi); 1245 } 1246 1247 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From, 1248 BasicBlock *To, 1249 Instruction *Start) { 1250 assert(MSSA->getBlockAccesses(To) == nullptr && 1251 "To block is expected to be free of MemoryAccesses."); 1252 moveAllAccesses(From, To, Start); 1253 for (BasicBlock *Succ : successors(To)) 1254 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ)) 1255 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To); 1256 } 1257 1258 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, 1259 Instruction *Start) { 1260 assert(From->getUniquePredecessor() == To && 1261 "From block is expected to have a single predecessor (To)."); 1262 moveAllAccesses(From, To, Start); 1263 for (BasicBlock *Succ : successors(From)) 1264 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ)) 1265 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To); 1266 } 1267 1268 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor( 1269 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds, 1270 bool IdenticalEdgesWereMerged) { 1271 assert(!MSSA->getWritableBlockAccesses(New) && 1272 "Access list should be null for a new block."); 1273 MemoryPhi *Phi = MSSA->getMemoryAccess(Old); 1274 if (!Phi) 1275 return; 1276 if (Old->hasNPredecessors(1)) { 1277 assert(pred_size(New) == Preds.size() && 1278 "Should have moved all predecessors."); 1279 MSSA->moveTo(Phi, New, MemorySSA::Beginning); 1280 } else { 1281 assert(!Preds.empty() && "Must be moving at least one predecessor to the " 1282 "new immediate predecessor."); 1283 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New); 1284 SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end()); 1285 // Currently only support the case of removing a single incoming edge when 1286 // identical edges were not merged. 1287 if (!IdenticalEdgesWereMerged) 1288 assert(PredsSet.size() == Preds.size() && 1289 "If identical edges were not merged, we cannot have duplicate " 1290 "blocks in the predecessors"); 1291 Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) { 1292 if (PredsSet.count(B)) { 1293 NewPhi->addIncoming(MA, B); 1294 if (!IdenticalEdgesWereMerged) 1295 PredsSet.erase(B); 1296 return true; 1297 } 1298 return false; 1299 }); 1300 Phi->addIncoming(NewPhi, New); 1301 tryRemoveTrivialPhi(NewPhi); 1302 } 1303 } 1304 1305 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) { 1306 assert(!MSSA->isLiveOnEntryDef(MA) && 1307 "Trying to remove the live on entry def"); 1308 // We can only delete phi nodes if they have no uses, or we can replace all 1309 // uses with a single definition. 1310 MemoryAccess *NewDefTarget = nullptr; 1311 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) { 1312 // Note that it is sufficient to know that all edges of the phi node have 1313 // the same argument. If they do, by the definition of dominance frontiers 1314 // (which we used to place this phi), that argument must dominate this phi, 1315 // and thus, must dominate the phi's uses, and so we will not hit the assert 1316 // below. 1317 NewDefTarget = onlySingleValue(MP); 1318 assert((NewDefTarget || MP->use_empty()) && 1319 "We can't delete this memory phi"); 1320 } else { 1321 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess(); 1322 } 1323 1324 SmallSetVector<MemoryPhi *, 4> PhisToCheck; 1325 1326 // Re-point the uses at our defining access 1327 if (!isa<MemoryUse>(MA) && !MA->use_empty()) { 1328 // Reset optimized on users of this store, and reset the uses. 1329 // A few notes: 1330 // 1. This is a slightly modified version of RAUW to avoid walking the 1331 // uses twice here. 1332 // 2. If we wanted to be complete, we would have to reset the optimized 1333 // flags on users of phi nodes if doing the below makes a phi node have all 1334 // the same arguments. Instead, we prefer users to removeMemoryAccess those 1335 // phi nodes, because doing it here would be N^3. 1336 if (MA->hasValueHandle()) 1337 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget); 1338 // Note: We assume MemorySSA is not used in metadata since it's not really 1339 // part of the IR. 1340 1341 assert(NewDefTarget != MA && "Going into an infinite loop"); 1342 while (!MA->use_empty()) { 1343 Use &U = *MA->use_begin(); 1344 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser())) 1345 MUD->resetOptimized(); 1346 if (OptimizePhis) 1347 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser())) 1348 PhisToCheck.insert(MP); 1349 U.set(NewDefTarget); 1350 } 1351 } 1352 1353 // The call below to erase will destroy MA, so we can't change the order we 1354 // are doing things here 1355 MSSA->removeFromLookups(MA); 1356 MSSA->removeFromLists(MA); 1357 1358 // Optionally optimize Phi uses. This will recursively remove trivial phis. 1359 if (!PhisToCheck.empty()) { 1360 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(), 1361 PhisToCheck.end()}; 1362 PhisToCheck.clear(); 1363 1364 unsigned PhisSize = PhisToOptimize.size(); 1365 while (PhisSize-- > 0) 1366 if (MemoryPhi *MP = 1367 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val())) 1368 tryRemoveTrivialPhi(MP); 1369 } 1370 } 1371 1372 void MemorySSAUpdater::removeBlocks( 1373 const SmallSetVector<BasicBlock *, 8> &DeadBlocks) { 1374 // First delete all uses of BB in MemoryPhis. 1375 for (BasicBlock *BB : DeadBlocks) { 1376 Instruction *TI = BB->getTerminator(); 1377 assert(TI && "Basic block expected to have a terminator instruction"); 1378 for (BasicBlock *Succ : successors(TI)) 1379 if (!DeadBlocks.count(Succ)) 1380 if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) { 1381 MP->unorderedDeleteIncomingBlock(BB); 1382 tryRemoveTrivialPhi(MP); 1383 } 1384 // Drop all references of all accesses in BB 1385 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB)) 1386 for (MemoryAccess &MA : *Acc) 1387 MA.dropAllReferences(); 1388 } 1389 1390 // Next, delete all memory accesses in each block 1391 for (BasicBlock *BB : DeadBlocks) { 1392 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB); 1393 if (!Acc) 1394 continue; 1395 for (MemoryAccess &MA : llvm::make_early_inc_range(*Acc)) { 1396 MSSA->removeFromLookups(&MA); 1397 MSSA->removeFromLists(&MA); 1398 } 1399 } 1400 } 1401 1402 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) { 1403 for (const auto &VH : UpdatedPHIs) 1404 if (auto *MPhi = cast_or_null<MemoryPhi>(VH)) 1405 tryRemoveTrivialPhi(MPhi); 1406 } 1407 1408 void MemorySSAUpdater::changeToUnreachable(const Instruction *I) { 1409 const BasicBlock *BB = I->getParent(); 1410 // Remove memory accesses in BB for I and all following instructions. 1411 auto BBI = I->getIterator(), BBE = BB->end(); 1412 // FIXME: If this becomes too expensive, iterate until the first instruction 1413 // with a memory access, then iterate over MemoryAccesses. 1414 while (BBI != BBE) 1415 removeMemoryAccess(&*(BBI++)); 1416 // Update phis in BB's successors to remove BB. 1417 SmallVector<WeakVH, 16> UpdatedPHIs; 1418 for (const BasicBlock *Successor : successors(BB)) { 1419 removeDuplicatePhiEdgesBetween(BB, Successor); 1420 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) { 1421 MPhi->unorderedDeleteIncomingBlock(BB); 1422 UpdatedPHIs.push_back(MPhi); 1423 } 1424 } 1425 // Optimize trivial phis. 1426 tryRemoveTrivialPhis(UpdatedPHIs); 1427 } 1428 1429 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB( 1430 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, 1431 MemorySSA::InsertionPlace Point) { 1432 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1433 MSSA->insertIntoListsForBlock(NewAccess, BB, Point); 1434 return NewAccess; 1435 } 1436 1437 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore( 1438 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) { 1439 assert(I->getParent() == InsertPt->getBlock() && 1440 "New and old access must be in the same block"); 1441 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1442 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 1443 InsertPt->getIterator()); 1444 return NewAccess; 1445 } 1446 1447 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter( 1448 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) { 1449 assert(I->getParent() == InsertPt->getBlock() && 1450 "New and old access must be in the same block"); 1451 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1452 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 1453 ++InsertPt->getIterator()); 1454 return NewAccess; 1455 } 1456