xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
10 //
11 //===----------------------------------------------------------------===//
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/Analysis/LoopIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/IteratedDominanceFrontier.h"
18 #include "llvm/Analysis/MemorySSA.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Metadata.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/FormattedStream.h"
29 #include <algorithm>
30 
31 #define DEBUG_TYPE "memoryssa"
32 using namespace llvm;
33 
34 // This is the marker algorithm from "Simple and Efficient Construction of
35 // Static Single Assignment Form"
36 // The simple, non-marker algorithm places phi nodes at any join
37 // Here, we place markers, and only place phi nodes if they end up necessary.
38 // They are only necessary if they break a cycle (IE we recursively visit
39 // ourselves again), or we discover, while getting the value of the operands,
40 // that there are two or more definitions needing to be merged.
41 // This still will leave non-minimal form in the case of irreducible control
42 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
43 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
44     BasicBlock *BB,
45     DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
46   // First, do a cache lookup. Without this cache, certain CFG structures
47   // (like a series of if statements) take exponential time to visit.
48   auto Cached = CachedPreviousDef.find(BB);
49   if (Cached != CachedPreviousDef.end())
50     return Cached->second;
51 
52   // If this method is called from an unreachable block, return LoE.
53   if (!MSSA->DT->isReachableFromEntry(BB))
54     return MSSA->getLiveOnEntryDef();
55 
56   if (BasicBlock *Pred = BB->getUniquePredecessor()) {
57     VisitedBlocks.insert(BB);
58     // Single predecessor case, just recurse, we can only have one definition.
59     MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
60     CachedPreviousDef.insert({BB, Result});
61     return Result;
62   }
63 
64   if (VisitedBlocks.count(BB)) {
65     // We hit our node again, meaning we had a cycle, we must insert a phi
66     // node to break it so we have an operand. The only case this will
67     // insert useless phis is if we have irreducible control flow.
68     MemoryAccess *Result = MSSA->createMemoryPhi(BB);
69     CachedPreviousDef.insert({BB, Result});
70     return Result;
71   }
72 
73   if (VisitedBlocks.insert(BB).second) {
74     // Mark us visited so we can detect a cycle
75     SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
76 
77     // Recurse to get the values in our predecessors for placement of a
78     // potential phi node. This will insert phi nodes if we cycle in order to
79     // break the cycle and have an operand.
80     bool UniqueIncomingAccess = true;
81     MemoryAccess *SingleAccess = nullptr;
82     for (auto *Pred : predecessors(BB)) {
83       if (MSSA->DT->isReachableFromEntry(Pred)) {
84         auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
85         if (!SingleAccess)
86           SingleAccess = IncomingAccess;
87         else if (IncomingAccess != SingleAccess)
88           UniqueIncomingAccess = false;
89         PhiOps.push_back(IncomingAccess);
90       } else
91         PhiOps.push_back(MSSA->getLiveOnEntryDef());
92     }
93 
94     // Now try to simplify the ops to avoid placing a phi.
95     // This may return null if we never created a phi yet, that's okay
96     MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
97 
98     // See if we can avoid the phi by simplifying it.
99     auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
100     // If we couldn't simplify, we may have to create a phi
101     if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
102       // A concrete Phi only exists if we created an empty one to break a cycle.
103       if (Phi) {
104         assert(Phi->operands().empty() && "Expected empty Phi");
105         Phi->replaceAllUsesWith(SingleAccess);
106         removeMemoryAccess(Phi);
107       }
108       Result = SingleAccess;
109     } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
110       if (!Phi)
111         Phi = MSSA->createMemoryPhi(BB);
112 
113       // See if the existing phi operands match what we need.
114       // Unlike normal SSA, we only allow one phi node per block, so we can't just
115       // create a new one.
116       if (Phi->getNumOperands() != 0) {
117         // FIXME: Figure out whether this is dead code and if so remove it.
118         if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
119           // These will have been filled in by the recursive read we did above.
120           llvm::copy(PhiOps, Phi->op_begin());
121           std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
122         }
123       } else {
124         unsigned i = 0;
125         for (auto *Pred : predecessors(BB))
126           Phi->addIncoming(&*PhiOps[i++], Pred);
127         InsertedPHIs.push_back(Phi);
128       }
129       Result = Phi;
130     }
131 
132     // Set ourselves up for the next variable by resetting visited state.
133     VisitedBlocks.erase(BB);
134     CachedPreviousDef.insert({BB, Result});
135     return Result;
136   }
137   llvm_unreachable("Should have hit one of the three cases above");
138 }
139 
140 // This starts at the memory access, and goes backwards in the block to find the
141 // previous definition. If a definition is not found the block of the access,
142 // it continues globally, creating phi nodes to ensure we have a single
143 // definition.
144 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
145   if (auto *LocalResult = getPreviousDefInBlock(MA))
146     return LocalResult;
147   DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
148   return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
149 }
150 
151 // This starts at the memory access, and goes backwards in the block to the find
152 // the previous definition. If the definition is not found in the block of the
153 // access, it returns nullptr.
154 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
155   auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
156 
157   // It's possible there are no defs, or we got handed the first def to start.
158   if (Defs) {
159     // If this is a def, we can just use the def iterators.
160     if (!isa<MemoryUse>(MA)) {
161       auto Iter = MA->getReverseDefsIterator();
162       ++Iter;
163       if (Iter != Defs->rend())
164         return &*Iter;
165     } else {
166       // Otherwise, have to walk the all access iterator.
167       auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
168       for (auto &U : make_range(++MA->getReverseIterator(), End))
169         if (!isa<MemoryUse>(U))
170           return cast<MemoryAccess>(&U);
171       // Note that if MA comes before Defs->begin(), we won't hit a def.
172       return nullptr;
173     }
174   }
175   return nullptr;
176 }
177 
178 // This starts at the end of block
179 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
180     BasicBlock *BB,
181     DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
182   auto *Defs = MSSA->getWritableBlockDefs(BB);
183 
184   if (Defs) {
185     CachedPreviousDef.insert({BB, &*Defs->rbegin()});
186     return &*Defs->rbegin();
187   }
188 
189   return getPreviousDefRecursive(BB, CachedPreviousDef);
190 }
191 // Recurse over a set of phi uses to eliminate the trivial ones
192 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
193   if (!Phi)
194     return nullptr;
195   TrackingVH<MemoryAccess> Res(Phi);
196   SmallVector<TrackingVH<Value>, 8> Uses;
197   std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
198   for (auto &U : Uses)
199     if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
200       tryRemoveTrivialPhi(UsePhi);
201   return Res;
202 }
203 
204 // Eliminate trivial phis
205 // Phis are trivial if they are defined either by themselves, or all the same
206 // argument.
207 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
208 // We recursively try to remove them.
209 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
210   assert(Phi && "Can only remove concrete Phi.");
211   auto OperRange = Phi->operands();
212   return tryRemoveTrivialPhi(Phi, OperRange);
213 }
214 template <class RangeType>
215 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
216                                                     RangeType &Operands) {
217   // Bail out on non-opt Phis.
218   if (NonOptPhis.count(Phi))
219     return Phi;
220 
221   // Detect equal or self arguments
222   MemoryAccess *Same = nullptr;
223   for (auto &Op : Operands) {
224     // If the same or self, good so far
225     if (Op == Phi || Op == Same)
226       continue;
227     // not the same, return the phi since it's not eliminatable by us
228     if (Same)
229       return Phi;
230     Same = cast<MemoryAccess>(&*Op);
231   }
232   // Never found a non-self reference, the phi is undef
233   if (Same == nullptr)
234     return MSSA->getLiveOnEntryDef();
235   if (Phi) {
236     Phi->replaceAllUsesWith(Same);
237     removeMemoryAccess(Phi);
238   }
239 
240   // We should only end up recursing in case we replaced something, in which
241   // case, we may have made other Phis trivial.
242   return recursePhi(Same);
243 }
244 
245 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
246   InsertedPHIs.clear();
247   MU->setDefiningAccess(getPreviousDef(MU));
248 
249   // In cases without unreachable blocks, because uses do not create new
250   // may-defs, there are only two cases:
251   // 1. There was a def already below us, and therefore, we should not have
252   // created a phi node because it was already needed for the def.
253   //
254   // 2. There is no def below us, and therefore, there is no extra renaming work
255   // to do.
256 
257   // In cases with unreachable blocks, where the unnecessary Phis were
258   // optimized out, adding the Use may re-insert those Phis. Hence, when
259   // inserting Uses outside of the MSSA creation process, and new Phis were
260   // added, rename all uses if we are asked.
261 
262   if (!RenameUses && !InsertedPHIs.empty()) {
263     auto *Defs = MSSA->getBlockDefs(MU->getBlock());
264     (void)Defs;
265     assert((!Defs || (++Defs->begin() == Defs->end())) &&
266            "Block may have only a Phi or no defs");
267   }
268 
269   if (RenameUses && InsertedPHIs.size()) {
270     SmallPtrSet<BasicBlock *, 16> Visited;
271     BasicBlock *StartBlock = MU->getBlock();
272 
273     if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
274       MemoryAccess *FirstDef = &*Defs->begin();
275       // Convert to incoming value if it's a memorydef. A phi *is* already an
276       // incoming value.
277       if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
278         FirstDef = MD->getDefiningAccess();
279 
280       MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
281     }
282     // We just inserted a phi into this block, so the incoming value will
283     // become the phi anyway, so it does not matter what we pass.
284     for (auto &MP : InsertedPHIs)
285       if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
286         MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
287   }
288 }
289 
290 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
291 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
292                                       MemoryAccess *NewDef) {
293   // Replace any operand with us an incoming block with the new defining
294   // access.
295   int i = MP->getBasicBlockIndex(BB);
296   assert(i != -1 && "Should have found the basic block in the phi");
297   // We can't just compare i against getNumOperands since one is signed and the
298   // other not. So use it to index into the block iterator.
299   for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
300        ++BBIter) {
301     if (*BBIter != BB)
302       break;
303     MP->setIncomingValue(i, NewDef);
304     ++i;
305   }
306 }
307 
308 // A brief description of the algorithm:
309 // First, we compute what should define the new def, using the SSA
310 // construction algorithm.
311 // Then, we update the defs below us (and any new phi nodes) in the graph to
312 // point to the correct new defs, to ensure we only have one variable, and no
313 // disconnected stores.
314 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
315   InsertedPHIs.clear();
316 
317   // See if we had a local def, and if not, go hunting.
318   MemoryAccess *DefBefore = getPreviousDef(MD);
319   bool DefBeforeSameBlock = false;
320   if (DefBefore->getBlock() == MD->getBlock() &&
321       !(isa<MemoryPhi>(DefBefore) &&
322         llvm::is_contained(InsertedPHIs, DefBefore)))
323     DefBeforeSameBlock = true;
324 
325   // There is a def before us, which means we can replace any store/phi uses
326   // of that thing with us, since we are in the way of whatever was there
327   // before.
328   // We now define that def's memorydefs and memoryphis
329   if (DefBeforeSameBlock) {
330     DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
331       // Leave the MemoryUses alone.
332       // Also make sure we skip ourselves to avoid self references.
333       User *Usr = U.getUser();
334       return !isa<MemoryUse>(Usr) && Usr != MD;
335       // Defs are automatically unoptimized when the user is set to MD below,
336       // because the isOptimized() call will fail to find the same ID.
337     });
338   }
339 
340   // and that def is now our defining access.
341   MD->setDefiningAccess(DefBefore);
342 
343   SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
344 
345   SmallSet<WeakVH, 8> ExistingPhis;
346 
347   // Remember the index where we may insert new phis.
348   unsigned NewPhiIndex = InsertedPHIs.size();
349   if (!DefBeforeSameBlock) {
350     // If there was a local def before us, we must have the same effect it
351     // did. Because every may-def is the same, any phis/etc we would create, it
352     // would also have created.  If there was no local def before us, we
353     // performed a global update, and have to search all successors and make
354     // sure we update the first def in each of them (following all paths until
355     // we hit the first def along each path). This may also insert phi nodes.
356     // TODO: There are other cases we can skip this work, such as when we have a
357     // single successor, and only used a straight line of single pred blocks
358     // backwards to find the def.  To make that work, we'd have to track whether
359     // getDefRecursive only ever used the single predecessor case.  These types
360     // of paths also only exist in between CFG simplifications.
361 
362     // If this is the first def in the block and this insert is in an arbitrary
363     // place, compute IDF and place phis.
364     SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
365 
366     // If this is the last Def in the block, we may need additional Phis.
367     // Compute IDF in all cases, as renaming needs to be done even when MD is
368     // not the last access, because it can introduce a new access past which a
369     // previous access was optimized; that access needs to be reoptimized.
370     DefiningBlocks.insert(MD->getBlock());
371     for (const auto &VH : InsertedPHIs)
372       if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
373         DefiningBlocks.insert(RealPHI->getBlock());
374     ForwardIDFCalculator IDFs(*MSSA->DT);
375     SmallVector<BasicBlock *, 32> IDFBlocks;
376     IDFs.setDefiningBlocks(DefiningBlocks);
377     IDFs.calculate(IDFBlocks);
378     SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
379     for (auto *BBIDF : IDFBlocks) {
380       auto *MPhi = MSSA->getMemoryAccess(BBIDF);
381       if (!MPhi) {
382         MPhi = MSSA->createMemoryPhi(BBIDF);
383         NewInsertedPHIs.push_back(MPhi);
384       } else {
385         ExistingPhis.insert(MPhi);
386       }
387       // Add the phis created into the IDF blocks to NonOptPhis, so they are not
388       // optimized out as trivial by the call to getPreviousDefFromEnd below.
389       // Once they are complete, all these Phis are added to the FixupList, and
390       // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
391       // need fixing as well, and potentially be trivial before this insertion,
392       // hence add all IDF Phis. See PR43044.
393       NonOptPhis.insert(MPhi);
394     }
395     for (auto &MPhi : NewInsertedPHIs) {
396       auto *BBIDF = MPhi->getBlock();
397       for (auto *Pred : predecessors(BBIDF)) {
398         DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
399         MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
400       }
401     }
402 
403     // Re-take the index where we're adding the new phis, because the above call
404     // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
405     NewPhiIndex = InsertedPHIs.size();
406     for (auto &MPhi : NewInsertedPHIs) {
407       InsertedPHIs.push_back(&*MPhi);
408       FixupList.push_back(&*MPhi);
409     }
410 
411     FixupList.push_back(MD);
412   }
413 
414   // Remember the index where we stopped inserting new phis above, since the
415   // fixupDefs call in the loop below may insert more, that are already minimal.
416   unsigned NewPhiIndexEnd = InsertedPHIs.size();
417 
418   while (!FixupList.empty()) {
419     unsigned StartingPHISize = InsertedPHIs.size();
420     fixupDefs(FixupList);
421     FixupList.clear();
422     // Put any new phis on the fixup list, and process them
423     FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
424   }
425 
426   // Optimize potentially non-minimal phis added in this method.
427   unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
428   if (NewPhiSize)
429     tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
430 
431   // Now that all fixups are done, rename all uses if we are asked. Skip
432   // renaming for defs in unreachable blocks.
433   BasicBlock *StartBlock = MD->getBlock();
434   if (RenameUses && MSSA->getDomTree().getNode(StartBlock)) {
435     SmallPtrSet<BasicBlock *, 16> Visited;
436     // We are guaranteed there is a def in the block, because we just got it
437     // handed to us in this function.
438     MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
439     // Convert to incoming value if it's a memorydef. A phi *is* already an
440     // incoming value.
441     if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
442       FirstDef = MD->getDefiningAccess();
443 
444     MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
445     // We just inserted a phi into this block, so the incoming value will become
446     // the phi anyway, so it does not matter what we pass.
447     for (auto &MP : InsertedPHIs) {
448       MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
449       if (Phi)
450         MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
451     }
452     // Existing Phi blocks may need renaming too, if an access was previously
453     // optimized and the inserted Defs "covers" the Optimized value.
454     for (auto &MP : ExistingPhis) {
455       MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
456       if (Phi)
457         MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
458     }
459   }
460 }
461 
462 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
463   SmallPtrSet<const BasicBlock *, 8> Seen;
464   SmallVector<const BasicBlock *, 16> Worklist;
465   for (auto &Var : Vars) {
466     MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
467     if (!NewDef)
468       continue;
469     // First, see if there is a local def after the operand.
470     auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
471     auto DefIter = NewDef->getDefsIterator();
472 
473     // The temporary Phi is being fixed, unmark it for not to optimize.
474     if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
475       NonOptPhis.erase(Phi);
476 
477     // If there is a local def after us, we only have to rename that.
478     if (++DefIter != Defs->end()) {
479       cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
480       continue;
481     }
482 
483     // Otherwise, we need to search down through the CFG.
484     // For each of our successors, handle it directly if their is a phi, or
485     // place on the fixup worklist.
486     for (const auto *S : successors(NewDef->getBlock())) {
487       if (auto *MP = MSSA->getMemoryAccess(S))
488         setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
489       else
490         Worklist.push_back(S);
491     }
492 
493     while (!Worklist.empty()) {
494       const BasicBlock *FixupBlock = Worklist.back();
495       Worklist.pop_back();
496 
497       // Get the first def in the block that isn't a phi node.
498       if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
499         auto *FirstDef = &*Defs->begin();
500         // The loop above and below should have taken care of phi nodes
501         assert(!isa<MemoryPhi>(FirstDef) &&
502                "Should have already handled phi nodes!");
503         // We are now this def's defining access, make sure we actually dominate
504         // it
505         assert(MSSA->dominates(NewDef, FirstDef) &&
506                "Should have dominated the new access");
507 
508         // This may insert new phi nodes, because we are not guaranteed the
509         // block we are processing has a single pred, and depending where the
510         // store was inserted, it may require phi nodes below it.
511         cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
512         return;
513       }
514       // We didn't find a def, so we must continue.
515       for (const auto *S : successors(FixupBlock)) {
516         // If there is a phi node, handle it.
517         // Otherwise, put the block on the worklist
518         if (auto *MP = MSSA->getMemoryAccess(S))
519           setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
520         else {
521           // If we cycle, we should have ended up at a phi node that we already
522           // processed.  FIXME: Double check this
523           if (!Seen.insert(S).second)
524             continue;
525           Worklist.push_back(S);
526         }
527       }
528     }
529   }
530 }
531 
532 void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
533   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
534     MPhi->unorderedDeleteIncomingBlock(From);
535     tryRemoveTrivialPhi(MPhi);
536   }
537 }
538 
539 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
540                                                       const BasicBlock *To) {
541   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
542     bool Found = false;
543     MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
544       if (From != B)
545         return false;
546       if (Found)
547         return true;
548       Found = true;
549       return false;
550     });
551     tryRemoveTrivialPhi(MPhi);
552   }
553 }
554 
555 /// If all arguments of a MemoryPHI are defined by the same incoming
556 /// argument, return that argument.
557 static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
558   MemoryAccess *MA = nullptr;
559 
560   for (auto &Arg : MP->operands()) {
561     if (!MA)
562       MA = cast<MemoryAccess>(Arg);
563     else if (MA != Arg)
564       return nullptr;
565   }
566   return MA;
567 }
568 
569 static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
570                                                   const ValueToValueMapTy &VMap,
571                                                   PhiToDefMap &MPhiMap,
572                                                   bool CloneWasSimplified,
573                                                   MemorySSA *MSSA) {
574   MemoryAccess *InsnDefining = MA;
575   if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
576     if (!MSSA->isLiveOnEntryDef(DefMUD)) {
577       Instruction *DefMUDI = DefMUD->getMemoryInst();
578       assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
579       if (Instruction *NewDefMUDI =
580               cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
581         InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
582         if (!CloneWasSimplified)
583           assert(InsnDefining && "Defining instruction cannot be nullptr.");
584         else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
585           // The clone was simplified, it's no longer a MemoryDef, look up.
586           auto DefIt = DefMUD->getDefsIterator();
587           // Since simplified clones only occur in single block cloning, a
588           // previous definition must exist, otherwise NewDefMUDI would not
589           // have been found in VMap.
590           assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
591                  "Previous def must exist");
592           InsnDefining = getNewDefiningAccessForClone(
593               &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
594         }
595       }
596     }
597   } else {
598     MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
599     if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
600       InsnDefining = NewDefPhi;
601   }
602   assert(InsnDefining && "Defining instruction cannot be nullptr.");
603   return InsnDefining;
604 }
605 
606 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
607                                         const ValueToValueMapTy &VMap,
608                                         PhiToDefMap &MPhiMap,
609                                         bool CloneWasSimplified) {
610   const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
611   if (!Acc)
612     return;
613   for (const MemoryAccess &MA : *Acc) {
614     if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
615       Instruction *Insn = MUD->getMemoryInst();
616       // Entry does not exist if the clone of the block did not clone all
617       // instructions. This occurs in LoopRotate when cloning instructions
618       // from the old header to the old preheader. The cloned instruction may
619       // also be a simplified Value, not an Instruction (see LoopRotate).
620       // Also in LoopRotate, even when it's an instruction, due to it being
621       // simplified, it may be a Use rather than a Def, so we cannot use MUD as
622       // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
623       if (Instruction *NewInsn =
624               dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
625         MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
626             NewInsn,
627             getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
628                                          MPhiMap, CloneWasSimplified, MSSA),
629             /*Template=*/CloneWasSimplified ? nullptr : MUD,
630             /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
631         if (NewUseOrDef)
632           MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
633       }
634     }
635   }
636 }
637 
638 void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
639     BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
640   auto *MPhi = MSSA->getMemoryAccess(Header);
641   if (!MPhi)
642     return;
643 
644   // Create phi node in the backedge block and populate it with the same
645   // incoming values as MPhi. Skip incoming values coming from Preheader.
646   auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
647   bool HasUniqueIncomingValue = true;
648   MemoryAccess *UniqueValue = nullptr;
649   for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
650     BasicBlock *IBB = MPhi->getIncomingBlock(I);
651     MemoryAccess *IV = MPhi->getIncomingValue(I);
652     if (IBB != Preheader) {
653       NewMPhi->addIncoming(IV, IBB);
654       if (HasUniqueIncomingValue) {
655         if (!UniqueValue)
656           UniqueValue = IV;
657         else if (UniqueValue != IV)
658           HasUniqueIncomingValue = false;
659       }
660     }
661   }
662 
663   // Update incoming edges into MPhi. Remove all but the incoming edge from
664   // Preheader. Add an edge from NewMPhi
665   auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
666   MPhi->setIncomingValue(0, AccFromPreheader);
667   MPhi->setIncomingBlock(0, Preheader);
668   for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
669     MPhi->unorderedDeleteIncoming(I);
670   MPhi->addIncoming(NewMPhi, BEBlock);
671 
672   // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
673   // replaced with the unique value.
674   tryRemoveTrivialPhi(NewMPhi);
675 }
676 
677 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
678                                            ArrayRef<BasicBlock *> ExitBlocks,
679                                            const ValueToValueMapTy &VMap,
680                                            bool IgnoreIncomingWithNoClones) {
681   PhiToDefMap MPhiMap;
682 
683   auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
684     assert(Phi && NewPhi && "Invalid Phi nodes.");
685     BasicBlock *NewPhiBB = NewPhi->getBlock();
686     SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
687                                                pred_end(NewPhiBB));
688     for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
689       MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
690       BasicBlock *IncBB = Phi->getIncomingBlock(It);
691 
692       if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
693         IncBB = NewIncBB;
694       else if (IgnoreIncomingWithNoClones)
695         continue;
696 
697       // Now we have IncBB, and will need to add incoming from it to NewPhi.
698 
699       // If IncBB is not a predecessor of NewPhiBB, then do not add it.
700       // NewPhiBB was cloned without that edge.
701       if (!NewPhiBBPreds.count(IncBB))
702         continue;
703 
704       // Determine incoming value and add it as incoming from IncBB.
705       if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
706         if (!MSSA->isLiveOnEntryDef(IncMUD)) {
707           Instruction *IncI = IncMUD->getMemoryInst();
708           assert(IncI && "Found MemoryUseOrDef with no Instruction.");
709           if (Instruction *NewIncI =
710                   cast_or_null<Instruction>(VMap.lookup(IncI))) {
711             IncMUD = MSSA->getMemoryAccess(NewIncI);
712             assert(IncMUD &&
713                    "MemoryUseOrDef cannot be null, all preds processed.");
714           }
715         }
716         NewPhi->addIncoming(IncMUD, IncBB);
717       } else {
718         MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
719         if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
720           NewPhi->addIncoming(NewDefPhi, IncBB);
721         else
722           NewPhi->addIncoming(IncPhi, IncBB);
723       }
724     }
725     if (auto *SingleAccess = onlySingleValue(NewPhi)) {
726       MPhiMap[Phi] = SingleAccess;
727       removeMemoryAccess(NewPhi);
728     }
729   };
730 
731   auto ProcessBlock = [&](BasicBlock *BB) {
732     BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
733     if (!NewBlock)
734       return;
735 
736     assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
737            "Cloned block should have no accesses");
738 
739     // Add MemoryPhi.
740     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
741       MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
742       MPhiMap[MPhi] = NewPhi;
743     }
744     // Update Uses and Defs.
745     cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
746   };
747 
748   for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
749     ProcessBlock(BB);
750 
751   for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
752     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
753       if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
754         FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
755 }
756 
757 void MemorySSAUpdater::updateForClonedBlockIntoPred(
758     BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
759   // All defs/phis from outside BB that are used in BB, are valid uses in P1.
760   // Since those defs/phis must have dominated BB, and also dominate P1.
761   // Defs from BB being used in BB will be replaced with the cloned defs from
762   // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
763   // incoming def into the Phi from P1.
764   // Instructions cloned into the predecessor are in practice sometimes
765   // simplified, so disable the use of the template, and create an access from
766   // scratch.
767   PhiToDefMap MPhiMap;
768   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
769     MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
770   cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
771 }
772 
773 template <typename Iter>
774 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
775     ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
776     DominatorTree &DT) {
777   SmallVector<CFGUpdate, 4> Updates;
778   // Update/insert phis in all successors of exit blocks.
779   for (auto *Exit : ExitBlocks)
780     for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
781       if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
782         BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
783         Updates.push_back({DT.Insert, NewExit, ExitSucc});
784       }
785   applyInsertUpdates(Updates, DT);
786 }
787 
788 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
789     ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
790     DominatorTree &DT) {
791   const ValueToValueMapTy *const Arr[] = {&VMap};
792   privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
793                                        std::end(Arr), DT);
794 }
795 
796 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
797     ArrayRef<BasicBlock *> ExitBlocks,
798     ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
799   auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
800     return I.get();
801   };
802   using MappedIteratorType =
803       mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
804                       decltype(GetPtr)>;
805   auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
806   auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
807   privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
808 }
809 
810 void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
811                                     DominatorTree &DT, bool UpdateDT) {
812   SmallVector<CFGUpdate, 4> DeleteUpdates;
813   SmallVector<CFGUpdate, 4> RevDeleteUpdates;
814   SmallVector<CFGUpdate, 4> InsertUpdates;
815   for (auto &Update : Updates) {
816     if (Update.getKind() == DT.Insert)
817       InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
818     else {
819       DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()});
820       RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
821     }
822   }
823 
824   if (!DeleteUpdates.empty()) {
825     if (!UpdateDT) {
826       SmallVector<CFGUpdate, 0> Empty;
827       // Deletes are reversed applied, because this CFGView is pretending the
828       // deletes did not happen yet, hence the edges still exist.
829       DT.applyUpdates(Empty, RevDeleteUpdates);
830     } else {
831       // Apply all updates, with the RevDeleteUpdates as PostCFGView.
832       DT.applyUpdates(Updates, RevDeleteUpdates);
833     }
834 
835     // Note: the MSSA update below doesn't distinguish between a GD with
836     // (RevDelete,false) and (Delete, true), but this matters for the DT
837     // updates above; for "children" purposes they are equivalent; but the
838     // updates themselves convey the desired update, used inside DT only.
839     GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
840     applyInsertUpdates(InsertUpdates, DT, &GD);
841     // Update DT to redelete edges; this matches the real CFG so we can perform
842     // the standard update without a postview of the CFG.
843     DT.applyUpdates(DeleteUpdates);
844   } else {
845     if (UpdateDT)
846       DT.applyUpdates(Updates);
847     GraphDiff<BasicBlock *> GD;
848     applyInsertUpdates(InsertUpdates, DT, &GD);
849   }
850 
851   // Update for deleted edges
852   for (auto &Update : DeleteUpdates)
853     removeEdge(Update.getFrom(), Update.getTo());
854 }
855 
856 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
857                                           DominatorTree &DT) {
858   GraphDiff<BasicBlock *> GD;
859   applyInsertUpdates(Updates, DT, &GD);
860 }
861 
862 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
863                                           DominatorTree &DT,
864                                           const GraphDiff<BasicBlock *> *GD) {
865   // Get recursive last Def, assuming well formed MSSA and updated DT.
866   auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
867     while (true) {
868       MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
869       // Return last Def or Phi in BB, if it exists.
870       if (Defs)
871         return &*(--Defs->end());
872 
873       // Check number of predecessors, we only care if there's more than one.
874       unsigned Count = 0;
875       BasicBlock *Pred = nullptr;
876       for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
877         Pred = Pi;
878         Count++;
879         if (Count == 2)
880           break;
881       }
882 
883       // If BB has multiple predecessors, get last definition from IDom.
884       if (Count != 1) {
885         // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
886         // DT is invalidated. Return LoE as its last def. This will be added to
887         // MemoryPhi node, and later deleted when the block is deleted.
888         if (!DT.getNode(BB))
889           return MSSA->getLiveOnEntryDef();
890         if (auto *IDom = DT.getNode(BB)->getIDom())
891           if (IDom->getBlock() != BB) {
892             BB = IDom->getBlock();
893             continue;
894           }
895         return MSSA->getLiveOnEntryDef();
896       } else {
897         // Single predecessor, BB cannot be dead. GetLastDef of Pred.
898         assert(Count == 1 && Pred && "Single predecessor expected.");
899         // BB can be unreachable though, return LoE if that is the case.
900         if (!DT.getNode(BB))
901           return MSSA->getLiveOnEntryDef();
902         BB = Pred;
903       }
904     };
905     llvm_unreachable("Unable to get last definition.");
906   };
907 
908   // Get nearest IDom given a set of blocks.
909   // TODO: this can be optimized by starting the search at the node with the
910   // lowest level (highest in the tree).
911   auto FindNearestCommonDominator =
912       [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
913     BasicBlock *PrevIDom = *BBSet.begin();
914     for (auto *BB : BBSet)
915       PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
916     return PrevIDom;
917   };
918 
919   // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
920   // include CurrIDom.
921   auto GetNoLongerDomBlocks =
922       [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
923           SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
924         if (PrevIDom == CurrIDom)
925           return;
926         BlocksPrevDom.push_back(PrevIDom);
927         BasicBlock *NextIDom = PrevIDom;
928         while (BasicBlock *UpIDom =
929                    DT.getNode(NextIDom)->getIDom()->getBlock()) {
930           if (UpIDom == CurrIDom)
931             break;
932           BlocksPrevDom.push_back(UpIDom);
933           NextIDom = UpIDom;
934         }
935       };
936 
937   // Map a BB to its predecessors: added + previously existing. To get a
938   // deterministic order, store predecessors as SetVectors. The order in each
939   // will be defined by the order in Updates (fixed) and the order given by
940   // children<> (also fixed). Since we further iterate over these ordered sets,
941   // we lose the information of multiple edges possibly existing between two
942   // blocks, so we'll keep and EdgeCount map for that.
943   // An alternate implementation could keep unordered set for the predecessors,
944   // traverse either Updates or children<> each time to get  the deterministic
945   // order, and drop the usage of EdgeCount. This alternate approach would still
946   // require querying the maps for each predecessor, and children<> call has
947   // additional computation inside for creating the snapshot-graph predecessors.
948   // As such, we favor using a little additional storage and less compute time.
949   // This decision can be revisited if we find the alternative more favorable.
950 
951   struct PredInfo {
952     SmallSetVector<BasicBlock *, 2> Added;
953     SmallSetVector<BasicBlock *, 2> Prev;
954   };
955   SmallDenseMap<BasicBlock *, PredInfo> PredMap;
956 
957   for (auto &Edge : Updates) {
958     BasicBlock *BB = Edge.getTo();
959     auto &AddedBlockSet = PredMap[BB].Added;
960     AddedBlockSet.insert(Edge.getFrom());
961   }
962 
963   // Store all existing predecessor for each BB, at least one must exist.
964   SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
965   SmallPtrSet<BasicBlock *, 2> NewBlocks;
966   for (auto &BBPredPair : PredMap) {
967     auto *BB = BBPredPair.first;
968     const auto &AddedBlockSet = BBPredPair.second.Added;
969     auto &PrevBlockSet = BBPredPair.second.Prev;
970     for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
971       if (!AddedBlockSet.count(Pi))
972         PrevBlockSet.insert(Pi);
973       EdgeCountMap[{Pi, BB}]++;
974     }
975 
976     if (PrevBlockSet.empty()) {
977       assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
978       LLVM_DEBUG(
979           dbgs()
980           << "Adding a predecessor to a block with no predecessors. "
981              "This must be an edge added to a new, likely cloned, block. "
982              "Its memory accesses must be already correct, assuming completed "
983              "via the updateExitBlocksForClonedLoop API. "
984              "Assert a single such edge is added so no phi addition or "
985              "additional processing is required.\n");
986       assert(AddedBlockSet.size() == 1 &&
987              "Can only handle adding one predecessor to a new block.");
988       // Need to remove new blocks from PredMap. Remove below to not invalidate
989       // iterator here.
990       NewBlocks.insert(BB);
991     }
992   }
993   // Nothing to process for new/cloned blocks.
994   for (auto *BB : NewBlocks)
995     PredMap.erase(BB);
996 
997   SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
998   SmallVector<WeakVH, 8> InsertedPhis;
999 
1000   // First create MemoryPhis in all blocks that don't have one. Create in the
1001   // order found in Updates, not in PredMap, to get deterministic numbering.
1002   for (auto &Edge : Updates) {
1003     BasicBlock *BB = Edge.getTo();
1004     if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
1005       InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
1006   }
1007 
1008   // Now we'll fill in the MemoryPhis with the right incoming values.
1009   for (auto &BBPredPair : PredMap) {
1010     auto *BB = BBPredPair.first;
1011     const auto &PrevBlockSet = BBPredPair.second.Prev;
1012     const auto &AddedBlockSet = BBPredPair.second.Added;
1013     assert(!PrevBlockSet.empty() &&
1014            "At least one previous predecessor must exist.");
1015 
1016     // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
1017     // keeping this map before the loop. We can reuse already populated entries
1018     // if an edge is added from the same predecessor to two different blocks,
1019     // and this does happen in rotate. Note that the map needs to be updated
1020     // when deleting non-necessary phis below, if the phi is in the map by
1021     // replacing the value with DefP1.
1022     SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
1023     for (auto *AddedPred : AddedBlockSet) {
1024       auto *DefPn = GetLastDef(AddedPred);
1025       assert(DefPn != nullptr && "Unable to find last definition.");
1026       LastDefAddedPred[AddedPred] = DefPn;
1027     }
1028 
1029     MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
1030     // If Phi is not empty, add an incoming edge from each added pred. Must
1031     // still compute blocks with defs to replace for this block below.
1032     if (NewPhi->getNumOperands()) {
1033       for (auto *Pred : AddedBlockSet) {
1034         auto *LastDefForPred = LastDefAddedPred[Pred];
1035         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1036           NewPhi->addIncoming(LastDefForPred, Pred);
1037       }
1038     } else {
1039       // Pick any existing predecessor and get its definition. All other
1040       // existing predecessors should have the same one, since no phi existed.
1041       auto *P1 = *PrevBlockSet.begin();
1042       MemoryAccess *DefP1 = GetLastDef(P1);
1043 
1044       // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1045       // nothing to add.
1046       bool InsertPhi = false;
1047       for (auto LastDefPredPair : LastDefAddedPred)
1048         if (DefP1 != LastDefPredPair.second) {
1049           InsertPhi = true;
1050           break;
1051         }
1052       if (!InsertPhi) {
1053         // Since NewPhi may be used in other newly added Phis, replace all uses
1054         // of NewPhi with the definition coming from all predecessors (DefP1),
1055         // before deleting it.
1056         NewPhi->replaceAllUsesWith(DefP1);
1057         removeMemoryAccess(NewPhi);
1058         continue;
1059       }
1060 
1061       // Update Phi with new values for new predecessors and old value for all
1062       // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1063       // sets, the order of entries in NewPhi is deterministic.
1064       for (auto *Pred : AddedBlockSet) {
1065         auto *LastDefForPred = LastDefAddedPred[Pred];
1066         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1067           NewPhi->addIncoming(LastDefForPred, Pred);
1068       }
1069       for (auto *Pred : PrevBlockSet)
1070         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1071           NewPhi->addIncoming(DefP1, Pred);
1072     }
1073 
1074     // Get all blocks that used to dominate BB and no longer do after adding
1075     // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1076     assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1077     BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1078     assert(PrevIDom && "Previous IDom should exists");
1079     BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1080     assert(NewIDom && "BB should have a new valid idom");
1081     assert(DT.dominates(NewIDom, PrevIDom) &&
1082            "New idom should dominate old idom");
1083     GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1084   }
1085 
1086   tryRemoveTrivialPhis(InsertedPhis);
1087   // Create the set of blocks that now have a definition. We'll use this to
1088   // compute IDF and add Phis there next.
1089   SmallVector<BasicBlock *, 8> BlocksToProcess;
1090   for (auto &VH : InsertedPhis)
1091     if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1092       BlocksToProcess.push_back(MPhi->getBlock());
1093 
1094   // Compute IDF and add Phis in all IDF blocks that do not have one.
1095   SmallVector<BasicBlock *, 32> IDFBlocks;
1096   if (!BlocksToProcess.empty()) {
1097     ForwardIDFCalculator IDFs(DT, GD);
1098     SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1099                                                  BlocksToProcess.end());
1100     IDFs.setDefiningBlocks(DefiningBlocks);
1101     IDFs.calculate(IDFBlocks);
1102 
1103     SmallSetVector<MemoryPhi *, 4> PhisToFill;
1104     // First create all needed Phis.
1105     for (auto *BBIDF : IDFBlocks)
1106       if (!MSSA->getMemoryAccess(BBIDF)) {
1107         auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1108         InsertedPhis.push_back(IDFPhi);
1109         PhisToFill.insert(IDFPhi);
1110       }
1111     // Then update or insert their correct incoming values.
1112     for (auto *BBIDF : IDFBlocks) {
1113       auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1114       assert(IDFPhi && "Phi must exist");
1115       if (!PhisToFill.count(IDFPhi)) {
1116         // Update existing Phi.
1117         // FIXME: some updates may be redundant, try to optimize and skip some.
1118         for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1119           IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1120       } else {
1121         for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
1122           IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1123       }
1124     }
1125   }
1126 
1127   // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1128   // longer dominate, replace those with the closest dominating def.
1129   // This will also update optimized accesses, as they're also uses.
1130   for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1131     if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1132       for (auto &DefToReplaceUses : *DefsList) {
1133         BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1134         Value::use_iterator UI = DefToReplaceUses.use_begin(),
1135                             E = DefToReplaceUses.use_end();
1136         for (; UI != E;) {
1137           Use &U = *UI;
1138           ++UI;
1139           MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
1140           if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1141             BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1142             if (!DT.dominates(DominatingBlock, DominatedBlock))
1143               U.set(GetLastDef(DominatedBlock));
1144           } else {
1145             BasicBlock *DominatedBlock = Usr->getBlock();
1146             if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1147               if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1148                 U.set(DomBlPhi);
1149               else {
1150                 auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1151                 assert(IDom && "Block must have a valid IDom.");
1152                 U.set(GetLastDef(IDom->getBlock()));
1153               }
1154               cast<MemoryUseOrDef>(Usr)->resetOptimized();
1155             }
1156           }
1157         }
1158       }
1159     }
1160   }
1161   tryRemoveTrivialPhis(InsertedPhis);
1162 }
1163 
1164 // Move What before Where in the MemorySSA IR.
1165 template <class WhereType>
1166 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1167                               WhereType Where) {
1168   // Mark MemoryPhi users of What not to be optimized.
1169   for (auto *U : What->users())
1170     if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1171       NonOptPhis.insert(PhiUser);
1172 
1173   // Replace all our users with our defining access.
1174   What->replaceAllUsesWith(What->getDefiningAccess());
1175 
1176   // Let MemorySSA take care of moving it around in the lists.
1177   MSSA->moveTo(What, BB, Where);
1178 
1179   // Now reinsert it into the IR and do whatever fixups needed.
1180   if (auto *MD = dyn_cast<MemoryDef>(What))
1181     insertDef(MD, /*RenameUses=*/true);
1182   else
1183     insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
1184 
1185   // Clear dangling pointers. We added all MemoryPhi users, but not all
1186   // of them are removed by fixupDefs().
1187   NonOptPhis.clear();
1188 }
1189 
1190 // Move What before Where in the MemorySSA IR.
1191 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1192   moveTo(What, Where->getBlock(), Where->getIterator());
1193 }
1194 
1195 // Move What after Where in the MemorySSA IR.
1196 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1197   moveTo(What, Where->getBlock(), ++Where->getIterator());
1198 }
1199 
1200 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1201                                    MemorySSA::InsertionPlace Where) {
1202   if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
1203     return moveTo(What, BB, Where);
1204 
1205   if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator()))
1206     return moveBefore(What, Where);
1207   else
1208     return moveTo(What, BB, MemorySSA::InsertionPlace::End);
1209 }
1210 
1211 // All accesses in To used to be in From. Move to end and update access lists.
1212 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1213                                        Instruction *Start) {
1214 
1215   MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1216   if (!Accs)
1217     return;
1218 
1219   assert(Start->getParent() == To && "Incorrect Start instruction");
1220   MemoryAccess *FirstInNew = nullptr;
1221   for (Instruction &I : make_range(Start->getIterator(), To->end()))
1222     if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1223       break;
1224   if (FirstInNew) {
1225     auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1226     do {
1227       auto NextIt = ++MUD->getIterator();
1228       MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1229                                     ? nullptr
1230                                     : cast<MemoryUseOrDef>(&*NextIt);
1231       MSSA->moveTo(MUD, To, MemorySSA::End);
1232       // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1233       // to retrieve it again.
1234       Accs = MSSA->getWritableBlockAccesses(From);
1235       MUD = NextMUD;
1236     } while (MUD);
1237   }
1238 
1239   // If all accesses were moved and only a trivial Phi remains, we try to remove
1240   // that Phi. This is needed when From is going to be deleted.
1241   auto *Defs = MSSA->getWritableBlockDefs(From);
1242   if (Defs && !Defs->empty())
1243     if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
1244       tryRemoveTrivialPhi(Phi);
1245 }
1246 
1247 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1248                                                 BasicBlock *To,
1249                                                 Instruction *Start) {
1250   assert(MSSA->getBlockAccesses(To) == nullptr &&
1251          "To block is expected to be free of MemoryAccesses.");
1252   moveAllAccesses(From, To, Start);
1253   for (BasicBlock *Succ : successors(To))
1254     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1255       MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1256 }
1257 
1258 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1259                                                Instruction *Start) {
1260   assert(From->getUniquePredecessor() == To &&
1261          "From block is expected to have a single predecessor (To).");
1262   moveAllAccesses(From, To, Start);
1263   for (BasicBlock *Succ : successors(From))
1264     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1265       MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1266 }
1267 
1268 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
1269     BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1270     bool IdenticalEdgesWereMerged) {
1271   assert(!MSSA->getWritableBlockAccesses(New) &&
1272          "Access list should be null for a new block.");
1273   MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1274   if (!Phi)
1275     return;
1276   if (Old->hasNPredecessors(1)) {
1277     assert(pred_size(New) == Preds.size() &&
1278            "Should have moved all predecessors.");
1279     MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1280   } else {
1281     assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1282                              "new immediate predecessor.");
1283     MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1284     SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1285     // Currently only support the case of removing a single incoming edge when
1286     // identical edges were not merged.
1287     if (!IdenticalEdgesWereMerged)
1288       assert(PredsSet.size() == Preds.size() &&
1289              "If identical edges were not merged, we cannot have duplicate "
1290              "blocks in the predecessors");
1291     Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1292       if (PredsSet.count(B)) {
1293         NewPhi->addIncoming(MA, B);
1294         if (!IdenticalEdgesWereMerged)
1295           PredsSet.erase(B);
1296         return true;
1297       }
1298       return false;
1299     });
1300     Phi->addIncoming(NewPhi, New);
1301     tryRemoveTrivialPhi(NewPhi);
1302   }
1303 }
1304 
1305 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
1306   assert(!MSSA->isLiveOnEntryDef(MA) &&
1307          "Trying to remove the live on entry def");
1308   // We can only delete phi nodes if they have no uses, or we can replace all
1309   // uses with a single definition.
1310   MemoryAccess *NewDefTarget = nullptr;
1311   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1312     // Note that it is sufficient to know that all edges of the phi node have
1313     // the same argument.  If they do, by the definition of dominance frontiers
1314     // (which we used to place this phi), that argument must dominate this phi,
1315     // and thus, must dominate the phi's uses, and so we will not hit the assert
1316     // below.
1317     NewDefTarget = onlySingleValue(MP);
1318     assert((NewDefTarget || MP->use_empty()) &&
1319            "We can't delete this memory phi");
1320   } else {
1321     NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1322   }
1323 
1324   SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1325 
1326   // Re-point the uses at our defining access
1327   if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1328     // Reset optimized on users of this store, and reset the uses.
1329     // A few notes:
1330     // 1. This is a slightly modified version of RAUW to avoid walking the
1331     // uses twice here.
1332     // 2. If we wanted to be complete, we would have to reset the optimized
1333     // flags on users of phi nodes if doing the below makes a phi node have all
1334     // the same arguments. Instead, we prefer users to removeMemoryAccess those
1335     // phi nodes, because doing it here would be N^3.
1336     if (MA->hasValueHandle())
1337       ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1338     // Note: We assume MemorySSA is not used in metadata since it's not really
1339     // part of the IR.
1340 
1341     assert(NewDefTarget != MA && "Going into an infinite loop");
1342     while (!MA->use_empty()) {
1343       Use &U = *MA->use_begin();
1344       if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1345         MUD->resetOptimized();
1346       if (OptimizePhis)
1347         if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1348           PhisToCheck.insert(MP);
1349       U.set(NewDefTarget);
1350     }
1351   }
1352 
1353   // The call below to erase will destroy MA, so we can't change the order we
1354   // are doing things here
1355   MSSA->removeFromLookups(MA);
1356   MSSA->removeFromLists(MA);
1357 
1358   // Optionally optimize Phi uses. This will recursively remove trivial phis.
1359   if (!PhisToCheck.empty()) {
1360     SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1361                                            PhisToCheck.end()};
1362     PhisToCheck.clear();
1363 
1364     unsigned PhisSize = PhisToOptimize.size();
1365     while (PhisSize-- > 0)
1366       if (MemoryPhi *MP =
1367               cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1368         tryRemoveTrivialPhi(MP);
1369   }
1370 }
1371 
1372 void MemorySSAUpdater::removeBlocks(
1373     const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
1374   // First delete all uses of BB in MemoryPhis.
1375   for (BasicBlock *BB : DeadBlocks) {
1376     Instruction *TI = BB->getTerminator();
1377     assert(TI && "Basic block expected to have a terminator instruction");
1378     for (BasicBlock *Succ : successors(TI))
1379       if (!DeadBlocks.count(Succ))
1380         if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1381           MP->unorderedDeleteIncomingBlock(BB);
1382           tryRemoveTrivialPhi(MP);
1383         }
1384     // Drop all references of all accesses in BB
1385     if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1386       for (MemoryAccess &MA : *Acc)
1387         MA.dropAllReferences();
1388   }
1389 
1390   // Next, delete all memory accesses in each block
1391   for (BasicBlock *BB : DeadBlocks) {
1392     MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1393     if (!Acc)
1394       continue;
1395     for (MemoryAccess &MA : llvm::make_early_inc_range(*Acc)) {
1396       MSSA->removeFromLookups(&MA);
1397       MSSA->removeFromLists(&MA);
1398     }
1399   }
1400 }
1401 
1402 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1403   for (auto &VH : UpdatedPHIs)
1404     if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1405       tryRemoveTrivialPhi(MPhi);
1406 }
1407 
1408 void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
1409   const BasicBlock *BB = I->getParent();
1410   // Remove memory accesses in BB for I and all following instructions.
1411   auto BBI = I->getIterator(), BBE = BB->end();
1412   // FIXME: If this becomes too expensive, iterate until the first instruction
1413   // with a memory access, then iterate over MemoryAccesses.
1414   while (BBI != BBE)
1415     removeMemoryAccess(&*(BBI++));
1416   // Update phis in BB's successors to remove BB.
1417   SmallVector<WeakVH, 16> UpdatedPHIs;
1418   for (const BasicBlock *Successor : successors(BB)) {
1419     removeDuplicatePhiEdgesBetween(BB, Successor);
1420     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1421       MPhi->unorderedDeleteIncomingBlock(BB);
1422       UpdatedPHIs.push_back(MPhi);
1423     }
1424   }
1425   // Optimize trivial phis.
1426   tryRemoveTrivialPhis(UpdatedPHIs);
1427 }
1428 
1429 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1430     Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1431     MemorySSA::InsertionPlace Point) {
1432   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1433   MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1434   return NewAccess;
1435 }
1436 
1437 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1438     Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1439   assert(I->getParent() == InsertPt->getBlock() &&
1440          "New and old access must be in the same block");
1441   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1442   MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1443                               InsertPt->getIterator());
1444   return NewAccess;
1445 }
1446 
1447 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1448     Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1449   assert(I->getParent() == InsertPt->getBlock() &&
1450          "New and old access must be in the same block");
1451   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1452   MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1453                               ++InsertPt->getIterator());
1454   return NewAccess;
1455 }
1456