1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------===// 8 // 9 // This file implements the MemorySSAUpdater class. 10 // 11 //===----------------------------------------------------------------===// 12 #include "llvm/Analysis/MemorySSAUpdater.h" 13 #include "llvm/Analysis/LoopIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/Analysis/IteratedDominanceFrontier.h" 18 #include "llvm/Analysis/MemorySSA.h" 19 #include "llvm/IR/BasicBlock.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/GlobalVariable.h" 23 #include "llvm/IR/IRBuilder.h" 24 #include "llvm/IR/LLVMContext.h" 25 #include "llvm/IR/Metadata.h" 26 #include "llvm/IR/Module.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/FormattedStream.h" 29 #include <algorithm> 30 31 #define DEBUG_TYPE "memoryssa" 32 using namespace llvm; 33 34 // This is the marker algorithm from "Simple and Efficient Construction of 35 // Static Single Assignment Form" 36 // The simple, non-marker algorithm places phi nodes at any join 37 // Here, we place markers, and only place phi nodes if they end up necessary. 38 // They are only necessary if they break a cycle (IE we recursively visit 39 // ourselves again), or we discover, while getting the value of the operands, 40 // that there are two or more definitions needing to be merged. 41 // This still will leave non-minimal form in the case of irreducible control 42 // flow, where phi nodes may be in cycles with themselves, but unnecessary. 43 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive( 44 BasicBlock *BB, 45 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) { 46 // First, do a cache lookup. Without this cache, certain CFG structures 47 // (like a series of if statements) take exponential time to visit. 48 auto Cached = CachedPreviousDef.find(BB); 49 if (Cached != CachedPreviousDef.end()) 50 return Cached->second; 51 52 // If this method is called from an unreachable block, return LoE. 53 if (!MSSA->DT->isReachableFromEntry(BB)) 54 return MSSA->getLiveOnEntryDef(); 55 56 if (BasicBlock *Pred = BB->getUniquePredecessor()) { 57 VisitedBlocks.insert(BB); 58 // Single predecessor case, just recurse, we can only have one definition. 59 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef); 60 CachedPreviousDef.insert({BB, Result}); 61 return Result; 62 } 63 64 if (VisitedBlocks.count(BB)) { 65 // We hit our node again, meaning we had a cycle, we must insert a phi 66 // node to break it so we have an operand. The only case this will 67 // insert useless phis is if we have irreducible control flow. 68 MemoryAccess *Result = MSSA->createMemoryPhi(BB); 69 CachedPreviousDef.insert({BB, Result}); 70 return Result; 71 } 72 73 if (VisitedBlocks.insert(BB).second) { 74 // Mark us visited so we can detect a cycle 75 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps; 76 77 // Recurse to get the values in our predecessors for placement of a 78 // potential phi node. This will insert phi nodes if we cycle in order to 79 // break the cycle and have an operand. 80 bool UniqueIncomingAccess = true; 81 MemoryAccess *SingleAccess = nullptr; 82 for (auto *Pred : predecessors(BB)) { 83 if (MSSA->DT->isReachableFromEntry(Pred)) { 84 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef); 85 if (!SingleAccess) 86 SingleAccess = IncomingAccess; 87 else if (IncomingAccess != SingleAccess) 88 UniqueIncomingAccess = false; 89 PhiOps.push_back(IncomingAccess); 90 } else 91 PhiOps.push_back(MSSA->getLiveOnEntryDef()); 92 } 93 94 // Now try to simplify the ops to avoid placing a phi. 95 // This may return null if we never created a phi yet, that's okay 96 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB)); 97 98 // See if we can avoid the phi by simplifying it. 99 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps); 100 // If we couldn't simplify, we may have to create a phi 101 if (Result == Phi && UniqueIncomingAccess && SingleAccess) { 102 // A concrete Phi only exists if we created an empty one to break a cycle. 103 if (Phi) { 104 assert(Phi->operands().empty() && "Expected empty Phi"); 105 Phi->replaceAllUsesWith(SingleAccess); 106 removeMemoryAccess(Phi); 107 } 108 Result = SingleAccess; 109 } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) { 110 if (!Phi) 111 Phi = MSSA->createMemoryPhi(BB); 112 113 // See if the existing phi operands match what we need. 114 // Unlike normal SSA, we only allow one phi node per block, so we can't just 115 // create a new one. 116 if (Phi->getNumOperands() != 0) { 117 // FIXME: Figure out whether this is dead code and if so remove it. 118 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) { 119 // These will have been filled in by the recursive read we did above. 120 llvm::copy(PhiOps, Phi->op_begin()); 121 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin()); 122 } 123 } else { 124 unsigned i = 0; 125 for (auto *Pred : predecessors(BB)) 126 Phi->addIncoming(&*PhiOps[i++], Pred); 127 InsertedPHIs.push_back(Phi); 128 } 129 Result = Phi; 130 } 131 132 // Set ourselves up for the next variable by resetting visited state. 133 VisitedBlocks.erase(BB); 134 CachedPreviousDef.insert({BB, Result}); 135 return Result; 136 } 137 llvm_unreachable("Should have hit one of the three cases above"); 138 } 139 140 // This starts at the memory access, and goes backwards in the block to find the 141 // previous definition. If a definition is not found the block of the access, 142 // it continues globally, creating phi nodes to ensure we have a single 143 // definition. 144 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) { 145 if (auto *LocalResult = getPreviousDefInBlock(MA)) 146 return LocalResult; 147 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef; 148 return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef); 149 } 150 151 // This starts at the memory access, and goes backwards in the block to the find 152 // the previous definition. If the definition is not found in the block of the 153 // access, it returns nullptr. 154 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) { 155 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock()); 156 157 // It's possible there are no defs, or we got handed the first def to start. 158 if (Defs) { 159 // If this is a def, we can just use the def iterators. 160 if (!isa<MemoryUse>(MA)) { 161 auto Iter = MA->getReverseDefsIterator(); 162 ++Iter; 163 if (Iter != Defs->rend()) 164 return &*Iter; 165 } else { 166 // Otherwise, have to walk the all access iterator. 167 auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend(); 168 for (auto &U : make_range(++MA->getReverseIterator(), End)) 169 if (!isa<MemoryUse>(U)) 170 return cast<MemoryAccess>(&U); 171 // Note that if MA comes before Defs->begin(), we won't hit a def. 172 return nullptr; 173 } 174 } 175 return nullptr; 176 } 177 178 // This starts at the end of block 179 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd( 180 BasicBlock *BB, 181 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) { 182 auto *Defs = MSSA->getWritableBlockDefs(BB); 183 184 if (Defs) { 185 CachedPreviousDef.insert({BB, &*Defs->rbegin()}); 186 return &*Defs->rbegin(); 187 } 188 189 return getPreviousDefRecursive(BB, CachedPreviousDef); 190 } 191 // Recurse over a set of phi uses to eliminate the trivial ones 192 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) { 193 if (!Phi) 194 return nullptr; 195 TrackingVH<MemoryAccess> Res(Phi); 196 SmallVector<TrackingVH<Value>, 8> Uses; 197 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses)); 198 for (auto &U : Uses) 199 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) 200 tryRemoveTrivialPhi(UsePhi); 201 return Res; 202 } 203 204 // Eliminate trivial phis 205 // Phis are trivial if they are defined either by themselves, or all the same 206 // argument. 207 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c) 208 // We recursively try to remove them. 209 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) { 210 assert(Phi && "Can only remove concrete Phi."); 211 auto OperRange = Phi->operands(); 212 return tryRemoveTrivialPhi(Phi, OperRange); 213 } 214 template <class RangeType> 215 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi, 216 RangeType &Operands) { 217 // Bail out on non-opt Phis. 218 if (NonOptPhis.count(Phi)) 219 return Phi; 220 221 // Detect equal or self arguments 222 MemoryAccess *Same = nullptr; 223 for (auto &Op : Operands) { 224 // If the same or self, good so far 225 if (Op == Phi || Op == Same) 226 continue; 227 // not the same, return the phi since it's not eliminatable by us 228 if (Same) 229 return Phi; 230 Same = cast<MemoryAccess>(&*Op); 231 } 232 // Never found a non-self reference, the phi is undef 233 if (Same == nullptr) 234 return MSSA->getLiveOnEntryDef(); 235 if (Phi) { 236 Phi->replaceAllUsesWith(Same); 237 removeMemoryAccess(Phi); 238 } 239 240 // We should only end up recursing in case we replaced something, in which 241 // case, we may have made other Phis trivial. 242 return recursePhi(Same); 243 } 244 245 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) { 246 InsertedPHIs.clear(); 247 MU->setDefiningAccess(getPreviousDef(MU)); 248 249 // In cases without unreachable blocks, because uses do not create new 250 // may-defs, there are only two cases: 251 // 1. There was a def already below us, and therefore, we should not have 252 // created a phi node because it was already needed for the def. 253 // 254 // 2. There is no def below us, and therefore, there is no extra renaming work 255 // to do. 256 257 // In cases with unreachable blocks, where the unnecessary Phis were 258 // optimized out, adding the Use may re-insert those Phis. Hence, when 259 // inserting Uses outside of the MSSA creation process, and new Phis were 260 // added, rename all uses if we are asked. 261 262 if (!RenameUses && !InsertedPHIs.empty()) { 263 auto *Defs = MSSA->getBlockDefs(MU->getBlock()); 264 (void)Defs; 265 assert((!Defs || (++Defs->begin() == Defs->end())) && 266 "Block may have only a Phi or no defs"); 267 } 268 269 if (RenameUses && InsertedPHIs.size()) { 270 SmallPtrSet<BasicBlock *, 16> Visited; 271 BasicBlock *StartBlock = MU->getBlock(); 272 273 if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) { 274 MemoryAccess *FirstDef = &*Defs->begin(); 275 // Convert to incoming value if it's a memorydef. A phi *is* already an 276 // incoming value. 277 if (auto *MD = dyn_cast<MemoryDef>(FirstDef)) 278 FirstDef = MD->getDefiningAccess(); 279 280 MSSA->renamePass(MU->getBlock(), FirstDef, Visited); 281 } 282 // We just inserted a phi into this block, so the incoming value will 283 // become the phi anyway, so it does not matter what we pass. 284 for (auto &MP : InsertedPHIs) 285 if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP)) 286 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 287 } 288 } 289 290 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef. 291 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, 292 MemoryAccess *NewDef) { 293 // Replace any operand with us an incoming block with the new defining 294 // access. 295 int i = MP->getBasicBlockIndex(BB); 296 assert(i != -1 && "Should have found the basic block in the phi"); 297 // We can't just compare i against getNumOperands since one is signed and the 298 // other not. So use it to index into the block iterator. 299 for (const BasicBlock *BlockBB : llvm::drop_begin(MP->blocks(), i)) { 300 if (BlockBB != BB) 301 break; 302 MP->setIncomingValue(i, NewDef); 303 ++i; 304 } 305 } 306 307 // A brief description of the algorithm: 308 // First, we compute what should define the new def, using the SSA 309 // construction algorithm. 310 // Then, we update the defs below us (and any new phi nodes) in the graph to 311 // point to the correct new defs, to ensure we only have one variable, and no 312 // disconnected stores. 313 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) { 314 InsertedPHIs.clear(); 315 316 // See if we had a local def, and if not, go hunting. 317 MemoryAccess *DefBefore = getPreviousDef(MD); 318 bool DefBeforeSameBlock = false; 319 if (DefBefore->getBlock() == MD->getBlock() && 320 !(isa<MemoryPhi>(DefBefore) && 321 llvm::is_contained(InsertedPHIs, DefBefore))) 322 DefBeforeSameBlock = true; 323 324 // There is a def before us, which means we can replace any store/phi uses 325 // of that thing with us, since we are in the way of whatever was there 326 // before. 327 // We now define that def's memorydefs and memoryphis 328 if (DefBeforeSameBlock) { 329 DefBefore->replaceUsesWithIf(MD, [MD](Use &U) { 330 // Leave the MemoryUses alone. 331 // Also make sure we skip ourselves to avoid self references. 332 User *Usr = U.getUser(); 333 return !isa<MemoryUse>(Usr) && Usr != MD; 334 // Defs are automatically unoptimized when the user is set to MD below, 335 // because the isOptimized() call will fail to find the same ID. 336 }); 337 } 338 339 // and that def is now our defining access. 340 MD->setDefiningAccess(DefBefore); 341 342 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end()); 343 344 SmallSet<WeakVH, 8> ExistingPhis; 345 346 // Remember the index where we may insert new phis. 347 unsigned NewPhiIndex = InsertedPHIs.size(); 348 if (!DefBeforeSameBlock) { 349 // If there was a local def before us, we must have the same effect it 350 // did. Because every may-def is the same, any phis/etc we would create, it 351 // would also have created. If there was no local def before us, we 352 // performed a global update, and have to search all successors and make 353 // sure we update the first def in each of them (following all paths until 354 // we hit the first def along each path). This may also insert phi nodes. 355 // TODO: There are other cases we can skip this work, such as when we have a 356 // single successor, and only used a straight line of single pred blocks 357 // backwards to find the def. To make that work, we'd have to track whether 358 // getDefRecursive only ever used the single predecessor case. These types 359 // of paths also only exist in between CFG simplifications. 360 361 // If this is the first def in the block and this insert is in an arbitrary 362 // place, compute IDF and place phis. 363 SmallPtrSet<BasicBlock *, 2> DefiningBlocks; 364 365 // If this is the last Def in the block, we may need additional Phis. 366 // Compute IDF in all cases, as renaming needs to be done even when MD is 367 // not the last access, because it can introduce a new access past which a 368 // previous access was optimized; that access needs to be reoptimized. 369 DefiningBlocks.insert(MD->getBlock()); 370 for (const auto &VH : InsertedPHIs) 371 if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH)) 372 DefiningBlocks.insert(RealPHI->getBlock()); 373 ForwardIDFCalculator IDFs(*MSSA->DT); 374 SmallVector<BasicBlock *, 32> IDFBlocks; 375 IDFs.setDefiningBlocks(DefiningBlocks); 376 IDFs.calculate(IDFBlocks); 377 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs; 378 for (auto *BBIDF : IDFBlocks) { 379 auto *MPhi = MSSA->getMemoryAccess(BBIDF); 380 if (!MPhi) { 381 MPhi = MSSA->createMemoryPhi(BBIDF); 382 NewInsertedPHIs.push_back(MPhi); 383 } else { 384 ExistingPhis.insert(MPhi); 385 } 386 // Add the phis created into the IDF blocks to NonOptPhis, so they are not 387 // optimized out as trivial by the call to getPreviousDefFromEnd below. 388 // Once they are complete, all these Phis are added to the FixupList, and 389 // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may 390 // need fixing as well, and potentially be trivial before this insertion, 391 // hence add all IDF Phis. See PR43044. 392 NonOptPhis.insert(MPhi); 393 } 394 for (auto &MPhi : NewInsertedPHIs) { 395 auto *BBIDF = MPhi->getBlock(); 396 for (auto *Pred : predecessors(BBIDF)) { 397 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef; 398 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred); 399 } 400 } 401 402 // Re-take the index where we're adding the new phis, because the above call 403 // to getPreviousDefFromEnd, may have inserted into InsertedPHIs. 404 NewPhiIndex = InsertedPHIs.size(); 405 for (auto &MPhi : NewInsertedPHIs) { 406 InsertedPHIs.push_back(&*MPhi); 407 FixupList.push_back(&*MPhi); 408 } 409 410 FixupList.push_back(MD); 411 } 412 413 // Remember the index where we stopped inserting new phis above, since the 414 // fixupDefs call in the loop below may insert more, that are already minimal. 415 unsigned NewPhiIndexEnd = InsertedPHIs.size(); 416 417 while (!FixupList.empty()) { 418 unsigned StartingPHISize = InsertedPHIs.size(); 419 fixupDefs(FixupList); 420 FixupList.clear(); 421 // Put any new phis on the fixup list, and process them 422 FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end()); 423 } 424 425 // Optimize potentially non-minimal phis added in this method. 426 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex; 427 if (NewPhiSize) 428 tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize)); 429 430 // Now that all fixups are done, rename all uses if we are asked. Skip 431 // renaming for defs in unreachable blocks. 432 BasicBlock *StartBlock = MD->getBlock(); 433 if (RenameUses && MSSA->getDomTree().getNode(StartBlock)) { 434 SmallPtrSet<BasicBlock *, 16> Visited; 435 // We are guaranteed there is a def in the block, because we just got it 436 // handed to us in this function. 437 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin(); 438 // Convert to incoming value if it's a memorydef. A phi *is* already an 439 // incoming value. 440 if (auto *MD = dyn_cast<MemoryDef>(FirstDef)) 441 FirstDef = MD->getDefiningAccess(); 442 443 MSSA->renamePass(MD->getBlock(), FirstDef, Visited); 444 // We just inserted a phi into this block, so the incoming value will become 445 // the phi anyway, so it does not matter what we pass. 446 for (auto &MP : InsertedPHIs) { 447 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP); 448 if (Phi) 449 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 450 } 451 // Existing Phi blocks may need renaming too, if an access was previously 452 // optimized and the inserted Defs "covers" the Optimized value. 453 for (auto &MP : ExistingPhis) { 454 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP); 455 if (Phi) 456 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 457 } 458 } 459 } 460 461 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) { 462 SmallPtrSet<const BasicBlock *, 8> Seen; 463 SmallVector<const BasicBlock *, 16> Worklist; 464 for (auto &Var : Vars) { 465 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var); 466 if (!NewDef) 467 continue; 468 // First, see if there is a local def after the operand. 469 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock()); 470 auto DefIter = NewDef->getDefsIterator(); 471 472 // The temporary Phi is being fixed, unmark it for not to optimize. 473 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef)) 474 NonOptPhis.erase(Phi); 475 476 // If there is a local def after us, we only have to rename that. 477 if (++DefIter != Defs->end()) { 478 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef); 479 continue; 480 } 481 482 // Otherwise, we need to search down through the CFG. 483 // For each of our successors, handle it directly if their is a phi, or 484 // place on the fixup worklist. 485 for (const auto *S : successors(NewDef->getBlock())) { 486 if (auto *MP = MSSA->getMemoryAccess(S)) 487 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef); 488 else 489 Worklist.push_back(S); 490 } 491 492 while (!Worklist.empty()) { 493 const BasicBlock *FixupBlock = Worklist.pop_back_val(); 494 495 // Get the first def in the block that isn't a phi node. 496 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) { 497 auto *FirstDef = &*Defs->begin(); 498 // The loop above and below should have taken care of phi nodes 499 assert(!isa<MemoryPhi>(FirstDef) && 500 "Should have already handled phi nodes!"); 501 // We are now this def's defining access, make sure we actually dominate 502 // it 503 assert(MSSA->dominates(NewDef, FirstDef) && 504 "Should have dominated the new access"); 505 506 // This may insert new phi nodes, because we are not guaranteed the 507 // block we are processing has a single pred, and depending where the 508 // store was inserted, it may require phi nodes below it. 509 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef)); 510 return; 511 } 512 // We didn't find a def, so we must continue. 513 for (const auto *S : successors(FixupBlock)) { 514 // If there is a phi node, handle it. 515 // Otherwise, put the block on the worklist 516 if (auto *MP = MSSA->getMemoryAccess(S)) 517 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef); 518 else { 519 // If we cycle, we should have ended up at a phi node that we already 520 // processed. FIXME: Double check this 521 if (!Seen.insert(S).second) 522 continue; 523 Worklist.push_back(S); 524 } 525 } 526 } 527 } 528 } 529 530 void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) { 531 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) { 532 MPhi->unorderedDeleteIncomingBlock(From); 533 tryRemoveTrivialPhi(MPhi); 534 } 535 } 536 537 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From, 538 const BasicBlock *To) { 539 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) { 540 bool Found = false; 541 MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) { 542 if (From != B) 543 return false; 544 if (Found) 545 return true; 546 Found = true; 547 return false; 548 }); 549 tryRemoveTrivialPhi(MPhi); 550 } 551 } 552 553 /// If all arguments of a MemoryPHI are defined by the same incoming 554 /// argument, return that argument. 555 static MemoryAccess *onlySingleValue(MemoryPhi *MP) { 556 MemoryAccess *MA = nullptr; 557 558 for (auto &Arg : MP->operands()) { 559 if (!MA) 560 MA = cast<MemoryAccess>(Arg); 561 else if (MA != Arg) 562 return nullptr; 563 } 564 return MA; 565 } 566 567 static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA, 568 const ValueToValueMapTy &VMap, 569 PhiToDefMap &MPhiMap, 570 bool CloneWasSimplified, 571 MemorySSA *MSSA) { 572 MemoryAccess *InsnDefining = MA; 573 if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) { 574 if (!MSSA->isLiveOnEntryDef(DefMUD)) { 575 Instruction *DefMUDI = DefMUD->getMemoryInst(); 576 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction."); 577 if (Instruction *NewDefMUDI = 578 cast_or_null<Instruction>(VMap.lookup(DefMUDI))) { 579 InsnDefining = MSSA->getMemoryAccess(NewDefMUDI); 580 if (!CloneWasSimplified) 581 assert(InsnDefining && "Defining instruction cannot be nullptr."); 582 else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) { 583 // The clone was simplified, it's no longer a MemoryDef, look up. 584 auto DefIt = DefMUD->getDefsIterator(); 585 // Since simplified clones only occur in single block cloning, a 586 // previous definition must exist, otherwise NewDefMUDI would not 587 // have been found in VMap. 588 assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() && 589 "Previous def must exist"); 590 InsnDefining = getNewDefiningAccessForClone( 591 &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA); 592 } 593 } 594 } 595 } else { 596 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining); 597 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi)) 598 InsnDefining = NewDefPhi; 599 } 600 assert(InsnDefining && "Defining instruction cannot be nullptr."); 601 return InsnDefining; 602 } 603 604 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB, 605 const ValueToValueMapTy &VMap, 606 PhiToDefMap &MPhiMap, 607 bool CloneWasSimplified) { 608 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB); 609 if (!Acc) 610 return; 611 for (const MemoryAccess &MA : *Acc) { 612 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) { 613 Instruction *Insn = MUD->getMemoryInst(); 614 // Entry does not exist if the clone of the block did not clone all 615 // instructions. This occurs in LoopRotate when cloning instructions 616 // from the old header to the old preheader. The cloned instruction may 617 // also be a simplified Value, not an Instruction (see LoopRotate). 618 // Also in LoopRotate, even when it's an instruction, due to it being 619 // simplified, it may be a Use rather than a Def, so we cannot use MUD as 620 // template. Calls coming from updateForClonedBlockIntoPred, ensure this. 621 if (Instruction *NewInsn = 622 dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) { 623 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess( 624 NewInsn, 625 getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap, 626 MPhiMap, CloneWasSimplified, MSSA), 627 /*Template=*/CloneWasSimplified ? nullptr : MUD, 628 /*CreationMustSucceed=*/CloneWasSimplified ? false : true); 629 if (NewUseOrDef) 630 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End); 631 } 632 } 633 } 634 } 635 636 void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock( 637 BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) { 638 auto *MPhi = MSSA->getMemoryAccess(Header); 639 if (!MPhi) 640 return; 641 642 // Create phi node in the backedge block and populate it with the same 643 // incoming values as MPhi. Skip incoming values coming from Preheader. 644 auto *NewMPhi = MSSA->createMemoryPhi(BEBlock); 645 bool HasUniqueIncomingValue = true; 646 MemoryAccess *UniqueValue = nullptr; 647 for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) { 648 BasicBlock *IBB = MPhi->getIncomingBlock(I); 649 MemoryAccess *IV = MPhi->getIncomingValue(I); 650 if (IBB != Preheader) { 651 NewMPhi->addIncoming(IV, IBB); 652 if (HasUniqueIncomingValue) { 653 if (!UniqueValue) 654 UniqueValue = IV; 655 else if (UniqueValue != IV) 656 HasUniqueIncomingValue = false; 657 } 658 } 659 } 660 661 // Update incoming edges into MPhi. Remove all but the incoming edge from 662 // Preheader. Add an edge from NewMPhi 663 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader); 664 MPhi->setIncomingValue(0, AccFromPreheader); 665 MPhi->setIncomingBlock(0, Preheader); 666 for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I) 667 MPhi->unorderedDeleteIncoming(I); 668 MPhi->addIncoming(NewMPhi, BEBlock); 669 670 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be 671 // replaced with the unique value. 672 tryRemoveTrivialPhi(NewMPhi); 673 } 674 675 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, 676 ArrayRef<BasicBlock *> ExitBlocks, 677 const ValueToValueMapTy &VMap, 678 bool IgnoreIncomingWithNoClones) { 679 PhiToDefMap MPhiMap; 680 681 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) { 682 assert(Phi && NewPhi && "Invalid Phi nodes."); 683 BasicBlock *NewPhiBB = NewPhi->getBlock(); 684 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB), 685 pred_end(NewPhiBB)); 686 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) { 687 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It); 688 BasicBlock *IncBB = Phi->getIncomingBlock(It); 689 690 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB))) 691 IncBB = NewIncBB; 692 else if (IgnoreIncomingWithNoClones) 693 continue; 694 695 // Now we have IncBB, and will need to add incoming from it to NewPhi. 696 697 // If IncBB is not a predecessor of NewPhiBB, then do not add it. 698 // NewPhiBB was cloned without that edge. 699 if (!NewPhiBBPreds.count(IncBB)) 700 continue; 701 702 // Determine incoming value and add it as incoming from IncBB. 703 if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) { 704 if (!MSSA->isLiveOnEntryDef(IncMUD)) { 705 Instruction *IncI = IncMUD->getMemoryInst(); 706 assert(IncI && "Found MemoryUseOrDef with no Instruction."); 707 if (Instruction *NewIncI = 708 cast_or_null<Instruction>(VMap.lookup(IncI))) { 709 IncMUD = MSSA->getMemoryAccess(NewIncI); 710 assert(IncMUD && 711 "MemoryUseOrDef cannot be null, all preds processed."); 712 } 713 } 714 NewPhi->addIncoming(IncMUD, IncBB); 715 } else { 716 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess); 717 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi)) 718 NewPhi->addIncoming(NewDefPhi, IncBB); 719 else 720 NewPhi->addIncoming(IncPhi, IncBB); 721 } 722 } 723 if (auto *SingleAccess = onlySingleValue(NewPhi)) { 724 MPhiMap[Phi] = SingleAccess; 725 removeMemoryAccess(NewPhi); 726 } 727 }; 728 729 auto ProcessBlock = [&](BasicBlock *BB) { 730 BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB)); 731 if (!NewBlock) 732 return; 733 734 assert(!MSSA->getWritableBlockAccesses(NewBlock) && 735 "Cloned block should have no accesses"); 736 737 // Add MemoryPhi. 738 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) { 739 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock); 740 MPhiMap[MPhi] = NewPhi; 741 } 742 // Update Uses and Defs. 743 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap); 744 }; 745 746 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks)) 747 ProcessBlock(BB); 748 749 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks)) 750 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) 751 if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi)) 752 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi)); 753 } 754 755 void MemorySSAUpdater::updateForClonedBlockIntoPred( 756 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) { 757 // All defs/phis from outside BB that are used in BB, are valid uses in P1. 758 // Since those defs/phis must have dominated BB, and also dominate P1. 759 // Defs from BB being used in BB will be replaced with the cloned defs from 760 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the 761 // incoming def into the Phi from P1. 762 // Instructions cloned into the predecessor are in practice sometimes 763 // simplified, so disable the use of the template, and create an access from 764 // scratch. 765 PhiToDefMap MPhiMap; 766 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) 767 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1); 768 cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true); 769 } 770 771 template <typename Iter> 772 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop( 773 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd, 774 DominatorTree &DT) { 775 SmallVector<CFGUpdate, 4> Updates; 776 // Update/insert phis in all successors of exit blocks. 777 for (auto *Exit : ExitBlocks) 778 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd)) 779 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) { 780 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0); 781 Updates.push_back({DT.Insert, NewExit, ExitSucc}); 782 } 783 applyInsertUpdates(Updates, DT); 784 } 785 786 void MemorySSAUpdater::updateExitBlocksForClonedLoop( 787 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap, 788 DominatorTree &DT) { 789 const ValueToValueMapTy *const Arr[] = {&VMap}; 790 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr), 791 std::end(Arr), DT); 792 } 793 794 void MemorySSAUpdater::updateExitBlocksForClonedLoop( 795 ArrayRef<BasicBlock *> ExitBlocks, 796 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) { 797 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) { 798 return I.get(); 799 }; 800 using MappedIteratorType = 801 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *, 802 decltype(GetPtr)>; 803 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr); 804 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr); 805 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT); 806 } 807 808 void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates, 809 DominatorTree &DT, bool UpdateDT) { 810 SmallVector<CFGUpdate, 4> DeleteUpdates; 811 SmallVector<CFGUpdate, 4> RevDeleteUpdates; 812 SmallVector<CFGUpdate, 4> InsertUpdates; 813 for (auto &Update : Updates) { 814 if (Update.getKind() == DT.Insert) 815 InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()}); 816 else { 817 DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()}); 818 RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()}); 819 } 820 } 821 822 if (!DeleteUpdates.empty()) { 823 if (!InsertUpdates.empty()) { 824 if (!UpdateDT) { 825 SmallVector<CFGUpdate, 0> Empty; 826 // Deletes are reversed applied, because this CFGView is pretending the 827 // deletes did not happen yet, hence the edges still exist. 828 DT.applyUpdates(Empty, RevDeleteUpdates); 829 } else { 830 // Apply all updates, with the RevDeleteUpdates as PostCFGView. 831 DT.applyUpdates(Updates, RevDeleteUpdates); 832 } 833 834 // Note: the MSSA update below doesn't distinguish between a GD with 835 // (RevDelete,false) and (Delete, true), but this matters for the DT 836 // updates above; for "children" purposes they are equivalent; but the 837 // updates themselves convey the desired update, used inside DT only. 838 GraphDiff<BasicBlock *> GD(RevDeleteUpdates); 839 applyInsertUpdates(InsertUpdates, DT, &GD); 840 // Update DT to redelete edges; this matches the real CFG so we can 841 // perform the standard update without a postview of the CFG. 842 DT.applyUpdates(DeleteUpdates); 843 } else { 844 if (UpdateDT) 845 DT.applyUpdates(DeleteUpdates); 846 } 847 } else { 848 if (UpdateDT) 849 DT.applyUpdates(Updates); 850 GraphDiff<BasicBlock *> GD; 851 applyInsertUpdates(InsertUpdates, DT, &GD); 852 } 853 854 // Update for deleted edges 855 for (auto &Update : DeleteUpdates) 856 removeEdge(Update.getFrom(), Update.getTo()); 857 } 858 859 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates, 860 DominatorTree &DT) { 861 GraphDiff<BasicBlock *> GD; 862 applyInsertUpdates(Updates, DT, &GD); 863 } 864 865 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates, 866 DominatorTree &DT, 867 const GraphDiff<BasicBlock *> *GD) { 868 // Get recursive last Def, assuming well formed MSSA and updated DT. 869 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * { 870 while (true) { 871 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB); 872 // Return last Def or Phi in BB, if it exists. 873 if (Defs) 874 return &*(--Defs->end()); 875 876 // Check number of predecessors, we only care if there's more than one. 877 unsigned Count = 0; 878 BasicBlock *Pred = nullptr; 879 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) { 880 Pred = Pi; 881 Count++; 882 if (Count == 2) 883 break; 884 } 885 886 // If BB has multiple predecessors, get last definition from IDom. 887 if (Count != 1) { 888 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its 889 // DT is invalidated. Return LoE as its last def. This will be added to 890 // MemoryPhi node, and later deleted when the block is deleted. 891 if (!DT.getNode(BB)) 892 return MSSA->getLiveOnEntryDef(); 893 if (auto *IDom = DT.getNode(BB)->getIDom()) 894 if (IDom->getBlock() != BB) { 895 BB = IDom->getBlock(); 896 continue; 897 } 898 return MSSA->getLiveOnEntryDef(); 899 } else { 900 // Single predecessor, BB cannot be dead. GetLastDef of Pred. 901 assert(Count == 1 && Pred && "Single predecessor expected."); 902 // BB can be unreachable though, return LoE if that is the case. 903 if (!DT.getNode(BB)) 904 return MSSA->getLiveOnEntryDef(); 905 BB = Pred; 906 } 907 }; 908 llvm_unreachable("Unable to get last definition."); 909 }; 910 911 // Get nearest IDom given a set of blocks. 912 // TODO: this can be optimized by starting the search at the node with the 913 // lowest level (highest in the tree). 914 auto FindNearestCommonDominator = 915 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * { 916 BasicBlock *PrevIDom = *BBSet.begin(); 917 for (auto *BB : BBSet) 918 PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB); 919 return PrevIDom; 920 }; 921 922 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not 923 // include CurrIDom. 924 auto GetNoLongerDomBlocks = 925 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom, 926 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) { 927 if (PrevIDom == CurrIDom) 928 return; 929 BlocksPrevDom.push_back(PrevIDom); 930 BasicBlock *NextIDom = PrevIDom; 931 while (BasicBlock *UpIDom = 932 DT.getNode(NextIDom)->getIDom()->getBlock()) { 933 if (UpIDom == CurrIDom) 934 break; 935 BlocksPrevDom.push_back(UpIDom); 936 NextIDom = UpIDom; 937 } 938 }; 939 940 // Map a BB to its predecessors: added + previously existing. To get a 941 // deterministic order, store predecessors as SetVectors. The order in each 942 // will be defined by the order in Updates (fixed) and the order given by 943 // children<> (also fixed). Since we further iterate over these ordered sets, 944 // we lose the information of multiple edges possibly existing between two 945 // blocks, so we'll keep and EdgeCount map for that. 946 // An alternate implementation could keep unordered set for the predecessors, 947 // traverse either Updates or children<> each time to get the deterministic 948 // order, and drop the usage of EdgeCount. This alternate approach would still 949 // require querying the maps for each predecessor, and children<> call has 950 // additional computation inside for creating the snapshot-graph predecessors. 951 // As such, we favor using a little additional storage and less compute time. 952 // This decision can be revisited if we find the alternative more favorable. 953 954 struct PredInfo { 955 SmallSetVector<BasicBlock *, 2> Added; 956 SmallSetVector<BasicBlock *, 2> Prev; 957 }; 958 SmallDenseMap<BasicBlock *, PredInfo> PredMap; 959 960 for (auto &Edge : Updates) { 961 BasicBlock *BB = Edge.getTo(); 962 auto &AddedBlockSet = PredMap[BB].Added; 963 AddedBlockSet.insert(Edge.getFrom()); 964 } 965 966 // Store all existing predecessor for each BB, at least one must exist. 967 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap; 968 SmallPtrSet<BasicBlock *, 2> NewBlocks; 969 for (auto &BBPredPair : PredMap) { 970 auto *BB = BBPredPair.first; 971 const auto &AddedBlockSet = BBPredPair.second.Added; 972 auto &PrevBlockSet = BBPredPair.second.Prev; 973 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) { 974 if (!AddedBlockSet.count(Pi)) 975 PrevBlockSet.insert(Pi); 976 EdgeCountMap[{Pi, BB}]++; 977 } 978 979 if (PrevBlockSet.empty()) { 980 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added."); 981 LLVM_DEBUG( 982 dbgs() 983 << "Adding a predecessor to a block with no predecessors. " 984 "This must be an edge added to a new, likely cloned, block. " 985 "Its memory accesses must be already correct, assuming completed " 986 "via the updateExitBlocksForClonedLoop API. " 987 "Assert a single such edge is added so no phi addition or " 988 "additional processing is required.\n"); 989 assert(AddedBlockSet.size() == 1 && 990 "Can only handle adding one predecessor to a new block."); 991 // Need to remove new blocks from PredMap. Remove below to not invalidate 992 // iterator here. 993 NewBlocks.insert(BB); 994 } 995 } 996 // Nothing to process for new/cloned blocks. 997 for (auto *BB : NewBlocks) 998 PredMap.erase(BB); 999 1000 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace; 1001 SmallVector<WeakVH, 8> InsertedPhis; 1002 1003 // First create MemoryPhis in all blocks that don't have one. Create in the 1004 // order found in Updates, not in PredMap, to get deterministic numbering. 1005 for (auto &Edge : Updates) { 1006 BasicBlock *BB = Edge.getTo(); 1007 if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB)) 1008 InsertedPhis.push_back(MSSA->createMemoryPhi(BB)); 1009 } 1010 1011 // Now we'll fill in the MemoryPhis with the right incoming values. 1012 for (auto &BBPredPair : PredMap) { 1013 auto *BB = BBPredPair.first; 1014 const auto &PrevBlockSet = BBPredPair.second.Prev; 1015 const auto &AddedBlockSet = BBPredPair.second.Added; 1016 assert(!PrevBlockSet.empty() && 1017 "At least one previous predecessor must exist."); 1018 1019 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by 1020 // keeping this map before the loop. We can reuse already populated entries 1021 // if an edge is added from the same predecessor to two different blocks, 1022 // and this does happen in rotate. Note that the map needs to be updated 1023 // when deleting non-necessary phis below, if the phi is in the map by 1024 // replacing the value with DefP1. 1025 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred; 1026 for (auto *AddedPred : AddedBlockSet) { 1027 auto *DefPn = GetLastDef(AddedPred); 1028 assert(DefPn != nullptr && "Unable to find last definition."); 1029 LastDefAddedPred[AddedPred] = DefPn; 1030 } 1031 1032 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB); 1033 // If Phi is not empty, add an incoming edge from each added pred. Must 1034 // still compute blocks with defs to replace for this block below. 1035 if (NewPhi->getNumOperands()) { 1036 for (auto *Pred : AddedBlockSet) { 1037 auto *LastDefForPred = LastDefAddedPred[Pred]; 1038 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1039 NewPhi->addIncoming(LastDefForPred, Pred); 1040 } 1041 } else { 1042 // Pick any existing predecessor and get its definition. All other 1043 // existing predecessors should have the same one, since no phi existed. 1044 auto *P1 = *PrevBlockSet.begin(); 1045 MemoryAccess *DefP1 = GetLastDef(P1); 1046 1047 // Check DefP1 against all Defs in LastDefPredPair. If all the same, 1048 // nothing to add. 1049 bool InsertPhi = false; 1050 for (auto LastDefPredPair : LastDefAddedPred) 1051 if (DefP1 != LastDefPredPair.second) { 1052 InsertPhi = true; 1053 break; 1054 } 1055 if (!InsertPhi) { 1056 // Since NewPhi may be used in other newly added Phis, replace all uses 1057 // of NewPhi with the definition coming from all predecessors (DefP1), 1058 // before deleting it. 1059 NewPhi->replaceAllUsesWith(DefP1); 1060 removeMemoryAccess(NewPhi); 1061 continue; 1062 } 1063 1064 // Update Phi with new values for new predecessors and old value for all 1065 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered 1066 // sets, the order of entries in NewPhi is deterministic. 1067 for (auto *Pred : AddedBlockSet) { 1068 auto *LastDefForPred = LastDefAddedPred[Pred]; 1069 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1070 NewPhi->addIncoming(LastDefForPred, Pred); 1071 } 1072 for (auto *Pred : PrevBlockSet) 1073 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1074 NewPhi->addIncoming(DefP1, Pred); 1075 } 1076 1077 // Get all blocks that used to dominate BB and no longer do after adding 1078 // AddedBlockSet, where PrevBlockSet are the previously known predecessors. 1079 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom"); 1080 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet); 1081 assert(PrevIDom && "Previous IDom should exists"); 1082 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock(); 1083 assert(NewIDom && "BB should have a new valid idom"); 1084 assert(DT.dominates(NewIDom, PrevIDom) && 1085 "New idom should dominate old idom"); 1086 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace); 1087 } 1088 1089 tryRemoveTrivialPhis(InsertedPhis); 1090 // Create the set of blocks that now have a definition. We'll use this to 1091 // compute IDF and add Phis there next. 1092 SmallVector<BasicBlock *, 8> BlocksToProcess; 1093 for (auto &VH : InsertedPhis) 1094 if (auto *MPhi = cast_or_null<MemoryPhi>(VH)) 1095 BlocksToProcess.push_back(MPhi->getBlock()); 1096 1097 // Compute IDF and add Phis in all IDF blocks that do not have one. 1098 SmallVector<BasicBlock *, 32> IDFBlocks; 1099 if (!BlocksToProcess.empty()) { 1100 ForwardIDFCalculator IDFs(DT, GD); 1101 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(), 1102 BlocksToProcess.end()); 1103 IDFs.setDefiningBlocks(DefiningBlocks); 1104 IDFs.calculate(IDFBlocks); 1105 1106 SmallSetVector<MemoryPhi *, 4> PhisToFill; 1107 // First create all needed Phis. 1108 for (auto *BBIDF : IDFBlocks) 1109 if (!MSSA->getMemoryAccess(BBIDF)) { 1110 auto *IDFPhi = MSSA->createMemoryPhi(BBIDF); 1111 InsertedPhis.push_back(IDFPhi); 1112 PhisToFill.insert(IDFPhi); 1113 } 1114 // Then update or insert their correct incoming values. 1115 for (auto *BBIDF : IDFBlocks) { 1116 auto *IDFPhi = MSSA->getMemoryAccess(BBIDF); 1117 assert(IDFPhi && "Phi must exist"); 1118 if (!PhisToFill.count(IDFPhi)) { 1119 // Update existing Phi. 1120 // FIXME: some updates may be redundant, try to optimize and skip some. 1121 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I) 1122 IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I))); 1123 } else { 1124 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF)) 1125 IDFPhi->addIncoming(GetLastDef(Pi), Pi); 1126 } 1127 } 1128 } 1129 1130 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no 1131 // longer dominate, replace those with the closest dominating def. 1132 // This will also update optimized accesses, as they're also uses. 1133 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) { 1134 if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) { 1135 for (auto &DefToReplaceUses : *DefsList) { 1136 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock(); 1137 for (Use &U : llvm::make_early_inc_range(DefToReplaceUses.uses())) { 1138 MemoryAccess *Usr = cast<MemoryAccess>(U.getUser()); 1139 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) { 1140 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U); 1141 if (!DT.dominates(DominatingBlock, DominatedBlock)) 1142 U.set(GetLastDef(DominatedBlock)); 1143 } else { 1144 BasicBlock *DominatedBlock = Usr->getBlock(); 1145 if (!DT.dominates(DominatingBlock, DominatedBlock)) { 1146 if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock)) 1147 U.set(DomBlPhi); 1148 else { 1149 auto *IDom = DT.getNode(DominatedBlock)->getIDom(); 1150 assert(IDom && "Block must have a valid IDom."); 1151 U.set(GetLastDef(IDom->getBlock())); 1152 } 1153 cast<MemoryUseOrDef>(Usr)->resetOptimized(); 1154 } 1155 } 1156 } 1157 } 1158 } 1159 } 1160 tryRemoveTrivialPhis(InsertedPhis); 1161 } 1162 1163 // Move What before Where in the MemorySSA IR. 1164 template <class WhereType> 1165 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 1166 WhereType Where) { 1167 // Mark MemoryPhi users of What not to be optimized. 1168 for (auto *U : What->users()) 1169 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U)) 1170 NonOptPhis.insert(PhiUser); 1171 1172 // Replace all our users with our defining access. 1173 What->replaceAllUsesWith(What->getDefiningAccess()); 1174 1175 // Let MemorySSA take care of moving it around in the lists. 1176 MSSA->moveTo(What, BB, Where); 1177 1178 // Now reinsert it into the IR and do whatever fixups needed. 1179 if (auto *MD = dyn_cast<MemoryDef>(What)) 1180 insertDef(MD, /*RenameUses=*/true); 1181 else 1182 insertUse(cast<MemoryUse>(What), /*RenameUses=*/true); 1183 1184 // Clear dangling pointers. We added all MemoryPhi users, but not all 1185 // of them are removed by fixupDefs(). 1186 NonOptPhis.clear(); 1187 } 1188 1189 // Move What before Where in the MemorySSA IR. 1190 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 1191 moveTo(What, Where->getBlock(), Where->getIterator()); 1192 } 1193 1194 // Move What after Where in the MemorySSA IR. 1195 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 1196 moveTo(What, Where->getBlock(), ++Where->getIterator()); 1197 } 1198 1199 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, 1200 MemorySSA::InsertionPlace Where) { 1201 if (Where != MemorySSA::InsertionPlace::BeforeTerminator) 1202 return moveTo(What, BB, Where); 1203 1204 if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator())) 1205 return moveBefore(What, Where); 1206 else 1207 return moveTo(What, BB, MemorySSA::InsertionPlace::End); 1208 } 1209 1210 // All accesses in To used to be in From. Move to end and update access lists. 1211 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To, 1212 Instruction *Start) { 1213 1214 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From); 1215 if (!Accs) 1216 return; 1217 1218 assert(Start->getParent() == To && "Incorrect Start instruction"); 1219 MemoryAccess *FirstInNew = nullptr; 1220 for (Instruction &I : make_range(Start->getIterator(), To->end())) 1221 if ((FirstInNew = MSSA->getMemoryAccess(&I))) 1222 break; 1223 if (FirstInNew) { 1224 auto *MUD = cast<MemoryUseOrDef>(FirstInNew); 1225 do { 1226 auto NextIt = ++MUD->getIterator(); 1227 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end()) 1228 ? nullptr 1229 : cast<MemoryUseOrDef>(&*NextIt); 1230 MSSA->moveTo(MUD, To, MemorySSA::End); 1231 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need 1232 // to retrieve it again. 1233 Accs = MSSA->getWritableBlockAccesses(From); 1234 MUD = NextMUD; 1235 } while (MUD); 1236 } 1237 1238 // If all accesses were moved and only a trivial Phi remains, we try to remove 1239 // that Phi. This is needed when From is going to be deleted. 1240 auto *Defs = MSSA->getWritableBlockDefs(From); 1241 if (Defs && !Defs->empty()) 1242 if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin())) 1243 tryRemoveTrivialPhi(Phi); 1244 } 1245 1246 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From, 1247 BasicBlock *To, 1248 Instruction *Start) { 1249 assert(MSSA->getBlockAccesses(To) == nullptr && 1250 "To block is expected to be free of MemoryAccesses."); 1251 moveAllAccesses(From, To, Start); 1252 for (BasicBlock *Succ : successors(To)) 1253 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ)) 1254 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To); 1255 } 1256 1257 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, 1258 Instruction *Start) { 1259 assert(From->getUniquePredecessor() == To && 1260 "From block is expected to have a single predecessor (To)."); 1261 moveAllAccesses(From, To, Start); 1262 for (BasicBlock *Succ : successors(From)) 1263 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ)) 1264 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To); 1265 } 1266 1267 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor( 1268 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds, 1269 bool IdenticalEdgesWereMerged) { 1270 assert(!MSSA->getWritableBlockAccesses(New) && 1271 "Access list should be null for a new block."); 1272 MemoryPhi *Phi = MSSA->getMemoryAccess(Old); 1273 if (!Phi) 1274 return; 1275 if (Old->hasNPredecessors(1)) { 1276 assert(pred_size(New) == Preds.size() && 1277 "Should have moved all predecessors."); 1278 MSSA->moveTo(Phi, New, MemorySSA::Beginning); 1279 } else { 1280 assert(!Preds.empty() && "Must be moving at least one predecessor to the " 1281 "new immediate predecessor."); 1282 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New); 1283 SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end()); 1284 // Currently only support the case of removing a single incoming edge when 1285 // identical edges were not merged. 1286 if (!IdenticalEdgesWereMerged) 1287 assert(PredsSet.size() == Preds.size() && 1288 "If identical edges were not merged, we cannot have duplicate " 1289 "blocks in the predecessors"); 1290 Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) { 1291 if (PredsSet.count(B)) { 1292 NewPhi->addIncoming(MA, B); 1293 if (!IdenticalEdgesWereMerged) 1294 PredsSet.erase(B); 1295 return true; 1296 } 1297 return false; 1298 }); 1299 Phi->addIncoming(NewPhi, New); 1300 tryRemoveTrivialPhi(NewPhi); 1301 } 1302 } 1303 1304 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) { 1305 assert(!MSSA->isLiveOnEntryDef(MA) && 1306 "Trying to remove the live on entry def"); 1307 // We can only delete phi nodes if they have no uses, or we can replace all 1308 // uses with a single definition. 1309 MemoryAccess *NewDefTarget = nullptr; 1310 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) { 1311 // Note that it is sufficient to know that all edges of the phi node have 1312 // the same argument. If they do, by the definition of dominance frontiers 1313 // (which we used to place this phi), that argument must dominate this phi, 1314 // and thus, must dominate the phi's uses, and so we will not hit the assert 1315 // below. 1316 NewDefTarget = onlySingleValue(MP); 1317 assert((NewDefTarget || MP->use_empty()) && 1318 "We can't delete this memory phi"); 1319 } else { 1320 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess(); 1321 } 1322 1323 SmallSetVector<MemoryPhi *, 4> PhisToCheck; 1324 1325 // Re-point the uses at our defining access 1326 if (!isa<MemoryUse>(MA) && !MA->use_empty()) { 1327 // Reset optimized on users of this store, and reset the uses. 1328 // A few notes: 1329 // 1. This is a slightly modified version of RAUW to avoid walking the 1330 // uses twice here. 1331 // 2. If we wanted to be complete, we would have to reset the optimized 1332 // flags on users of phi nodes if doing the below makes a phi node have all 1333 // the same arguments. Instead, we prefer users to removeMemoryAccess those 1334 // phi nodes, because doing it here would be N^3. 1335 if (MA->hasValueHandle()) 1336 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget); 1337 // Note: We assume MemorySSA is not used in metadata since it's not really 1338 // part of the IR. 1339 1340 assert(NewDefTarget != MA && "Going into an infinite loop"); 1341 while (!MA->use_empty()) { 1342 Use &U = *MA->use_begin(); 1343 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser())) 1344 MUD->resetOptimized(); 1345 if (OptimizePhis) 1346 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser())) 1347 PhisToCheck.insert(MP); 1348 U.set(NewDefTarget); 1349 } 1350 } 1351 1352 // The call below to erase will destroy MA, so we can't change the order we 1353 // are doing things here 1354 MSSA->removeFromLookups(MA); 1355 MSSA->removeFromLists(MA); 1356 1357 // Optionally optimize Phi uses. This will recursively remove trivial phis. 1358 if (!PhisToCheck.empty()) { 1359 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(), 1360 PhisToCheck.end()}; 1361 PhisToCheck.clear(); 1362 1363 unsigned PhisSize = PhisToOptimize.size(); 1364 while (PhisSize-- > 0) 1365 if (MemoryPhi *MP = 1366 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val())) 1367 tryRemoveTrivialPhi(MP); 1368 } 1369 } 1370 1371 void MemorySSAUpdater::removeBlocks( 1372 const SmallSetVector<BasicBlock *, 8> &DeadBlocks) { 1373 // First delete all uses of BB in MemoryPhis. 1374 for (BasicBlock *BB : DeadBlocks) { 1375 Instruction *TI = BB->getTerminator(); 1376 assert(TI && "Basic block expected to have a terminator instruction"); 1377 for (BasicBlock *Succ : successors(TI)) 1378 if (!DeadBlocks.count(Succ)) 1379 if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) { 1380 MP->unorderedDeleteIncomingBlock(BB); 1381 tryRemoveTrivialPhi(MP); 1382 } 1383 // Drop all references of all accesses in BB 1384 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB)) 1385 for (MemoryAccess &MA : *Acc) 1386 MA.dropAllReferences(); 1387 } 1388 1389 // Next, delete all memory accesses in each block 1390 for (BasicBlock *BB : DeadBlocks) { 1391 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB); 1392 if (!Acc) 1393 continue; 1394 for (MemoryAccess &MA : llvm::make_early_inc_range(*Acc)) { 1395 MSSA->removeFromLookups(&MA); 1396 MSSA->removeFromLists(&MA); 1397 } 1398 } 1399 } 1400 1401 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) { 1402 for (auto &VH : UpdatedPHIs) 1403 if (auto *MPhi = cast_or_null<MemoryPhi>(VH)) 1404 tryRemoveTrivialPhi(MPhi); 1405 } 1406 1407 void MemorySSAUpdater::changeToUnreachable(const Instruction *I) { 1408 const BasicBlock *BB = I->getParent(); 1409 // Remove memory accesses in BB for I and all following instructions. 1410 auto BBI = I->getIterator(), BBE = BB->end(); 1411 // FIXME: If this becomes too expensive, iterate until the first instruction 1412 // with a memory access, then iterate over MemoryAccesses. 1413 while (BBI != BBE) 1414 removeMemoryAccess(&*(BBI++)); 1415 // Update phis in BB's successors to remove BB. 1416 SmallVector<WeakVH, 16> UpdatedPHIs; 1417 for (const BasicBlock *Successor : successors(BB)) { 1418 removeDuplicatePhiEdgesBetween(BB, Successor); 1419 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) { 1420 MPhi->unorderedDeleteIncomingBlock(BB); 1421 UpdatedPHIs.push_back(MPhi); 1422 } 1423 } 1424 // Optimize trivial phis. 1425 tryRemoveTrivialPhis(UpdatedPHIs); 1426 } 1427 1428 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB( 1429 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, 1430 MemorySSA::InsertionPlace Point) { 1431 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1432 MSSA->insertIntoListsForBlock(NewAccess, BB, Point); 1433 return NewAccess; 1434 } 1435 1436 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore( 1437 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) { 1438 assert(I->getParent() == InsertPt->getBlock() && 1439 "New and old access must be in the same block"); 1440 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1441 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 1442 InsertPt->getIterator()); 1443 return NewAccess; 1444 } 1445 1446 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter( 1447 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) { 1448 assert(I->getParent() == InsertPt->getBlock() && 1449 "New and old access must be in the same block"); 1450 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1451 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 1452 ++InsertPt->getIterator()); 1453 return NewAccess; 1454 } 1455