xref: /freebsd/contrib/llvm-project/llvm/include/llvm/Analysis/MemorySSA.h (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file exposes an interface to building/using memory SSA to
11 /// walk memory instructions using a use/def graph.
12 ///
13 /// Memory SSA class builds an SSA form that links together memory access
14 /// instructions such as loads, stores, atomics, and calls. Additionally, it
15 /// does a trivial form of "heap versioning" Every time the memory state changes
16 /// in the program, we generate a new heap version. It generates
17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
18 ///
19 /// As a trivial example,
20 /// define i32 @main() #0 {
21 /// entry:
22 ///   %call = call noalias i8* @_Znwm(i64 4) #2
23 ///   %0 = bitcast i8* %call to i32*
24 ///   %call1 = call noalias i8* @_Znwm(i64 4) #2
25 ///   %1 = bitcast i8* %call1 to i32*
26 ///   store i32 5, i32* %0, align 4
27 ///   store i32 7, i32* %1, align 4
28 ///   %2 = load i32* %0, align 4
29 ///   %3 = load i32* %1, align 4
30 ///   %add = add nsw i32 %2, %3
31 ///   ret i32 %add
32 /// }
33 ///
34 /// Will become
35 /// define i32 @main() #0 {
36 /// entry:
37 ///   ; 1 = MemoryDef(0)
38 ///   %call = call noalias i8* @_Znwm(i64 4) #3
39 ///   %2 = bitcast i8* %call to i32*
40 ///   ; 2 = MemoryDef(1)
41 ///   %call1 = call noalias i8* @_Znwm(i64 4) #3
42 ///   %4 = bitcast i8* %call1 to i32*
43 ///   ; 3 = MemoryDef(2)
44 ///   store i32 5, i32* %2, align 4
45 ///   ; 4 = MemoryDef(3)
46 ///   store i32 7, i32* %4, align 4
47 ///   ; MemoryUse(3)
48 ///   %7 = load i32* %2, align 4
49 ///   ; MemoryUse(4)
50 ///   %8 = load i32* %4, align 4
51 ///   %add = add nsw i32 %7, %8
52 ///   ret i32 %add
53 /// }
54 ///
55 /// Given this form, all the stores that could ever effect the load at %8 can be
56 /// gotten by using the MemoryUse associated with it, and walking from use to
57 /// def until you hit the top of the function.
58 ///
59 /// Each def also has a list of users associated with it, so you can walk from
60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63 /// store, all the MemoryUses on its use lists are may-aliases of that store
64 /// (but the MemoryDefs on its use list may not be).
65 ///
66 /// MemoryDefs are not disambiguated because it would require multiple reaching
67 /// definitions, which would require multiple phis, and multiple memoryaccesses
68 /// per instruction.
69 ///
70 /// In addition to the def/use graph described above, MemoryDefs also contain
71 /// an "optimized" definition use.  The "optimized" use points to some def
72 /// reachable through the memory def chain.  The optimized def *may* (but is
73 /// not required to) alias the original MemoryDef, but no def *closer* to the
74 /// source def may alias it.  As the name implies, the purpose of the optimized
75 /// use is to allow caching of clobber searches for memory defs.  The optimized
76 /// def may be nullptr, in which case clients must walk the defining access
77 /// chain.
78 ///
79 /// When iterating the uses of a MemoryDef, both defining uses and optimized
80 /// uses will be encountered.  If only one type is needed, the client must
81 /// filter the use walk.
82 //
83 //===----------------------------------------------------------------------===//
84 
85 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
86 #define LLVM_ANALYSIS_MEMORYSSA_H
87 
88 #include "llvm/ADT/DenseMap.h"
89 #include "llvm/ADT/SmallPtrSet.h"
90 #include "llvm/ADT/SmallVector.h"
91 #include "llvm/ADT/ilist_node.h"
92 #include "llvm/ADT/iterator_range.h"
93 #include "llvm/Analysis/AliasAnalysis.h"
94 #include "llvm/Analysis/MemoryLocation.h"
95 #include "llvm/Analysis/PHITransAddr.h"
96 #include "llvm/IR/DerivedUser.h"
97 #include "llvm/IR/Dominators.h"
98 #include "llvm/IR/Type.h"
99 #include "llvm/IR/User.h"
100 #include "llvm/Pass.h"
101 #include "llvm/Support/Compiler.h"
102 #include <algorithm>
103 #include <cassert>
104 #include <cstddef>
105 #include <iterator>
106 #include <memory>
107 #include <utility>
108 
109 namespace llvm {
110 
111 template <class GraphType> struct GraphTraits;
112 class Function;
113 class Loop;
114 class LLVMContext;
115 class MemoryAccess;
116 class MemorySSAWalker;
117 class Module;
118 class raw_ostream;
119 
120 namespace MSSAHelpers {
121 
122 struct AllAccessTag {};
123 struct DefsOnlyTag {};
124 
125 } // end namespace MSSAHelpers
126 
127 enum : unsigned {
128   // Used to signify what the default invalid ID is for MemoryAccess's
129   // getID()
130   INVALID_MEMORYACCESS_ID = -1U
131 };
132 
133 template <class T> class memoryaccess_def_iterator_base;
134 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
135 using const_memoryaccess_def_iterator =
136     memoryaccess_def_iterator_base<const MemoryAccess>;
137 
138 // The base for all memory accesses. All memory accesses in a block are
139 // linked together using an intrusive list.
140 class MemoryAccess
141     : public DerivedUser,
142       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
143       public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
144 public:
145   using AllAccessType =
146       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
147   using DefsOnlyType =
148       ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
149 
150   MemoryAccess(const MemoryAccess &) = delete;
151   MemoryAccess &operator=(const MemoryAccess &) = delete;
152 
153   void *operator new(size_t) = delete;
154 
155   // Methods for support type inquiry through isa, cast, and
156   // dyn_cast
classof(const Value * V)157   static bool classof(const Value *V) {
158     unsigned ID = V->getValueID();
159     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
160   }
161 
getBlock()162   BasicBlock *getBlock() const { return Block; }
163 
164   LLVM_ABI void print(raw_ostream &OS) const;
165   LLVM_ABI void dump() const;
166 
167   /// The user iterators for a memory access
168   using iterator = user_iterator;
169   using const_iterator = const_user_iterator;
170 
171   /// This iterator walks over all of the defs in a given
172   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
173   /// MemoryUse/MemoryDef, this walks the defining access.
174   memoryaccess_def_iterator defs_begin();
175   const_memoryaccess_def_iterator defs_begin() const;
176   memoryaccess_def_iterator defs_end();
177   const_memoryaccess_def_iterator defs_end() const;
178 
179   /// Get the iterators for the all access list and the defs only list
180   /// We default to the all access list.
getIterator()181   AllAccessType::self_iterator getIterator() {
182     return this->AllAccessType::getIterator();
183   }
getIterator()184   AllAccessType::const_self_iterator getIterator() const {
185     return this->AllAccessType::getIterator();
186   }
getReverseIterator()187   AllAccessType::reverse_self_iterator getReverseIterator() {
188     return this->AllAccessType::getReverseIterator();
189   }
getReverseIterator()190   AllAccessType::const_reverse_self_iterator getReverseIterator() const {
191     return this->AllAccessType::getReverseIterator();
192   }
getDefsIterator()193   DefsOnlyType::self_iterator getDefsIterator() {
194     return this->DefsOnlyType::getIterator();
195   }
getDefsIterator()196   DefsOnlyType::const_self_iterator getDefsIterator() const {
197     return this->DefsOnlyType::getIterator();
198   }
getReverseDefsIterator()199   DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
200     return this->DefsOnlyType::getReverseIterator();
201   }
getReverseDefsIterator()202   DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
203     return this->DefsOnlyType::getReverseIterator();
204   }
205 
206 protected:
207   friend class MemoryDef;
208   friend class MemoryPhi;
209   friend class MemorySSA;
210   friend class MemoryUse;
211   friend class MemoryUseOrDef;
212 
213   /// Used by MemorySSA to change the block of a MemoryAccess when it is
214   /// moved.
setBlock(BasicBlock * BB)215   void setBlock(BasicBlock *BB) { Block = BB; }
216 
217   /// Used for debugging and tracking things about MemoryAccesses.
218   /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
219   inline unsigned getID() const;
220 
MemoryAccess(LLVMContext & C,unsigned Vty,DeleteValueTy DeleteValue,BasicBlock * BB,AllocInfo AllocInfo)221   MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
222                BasicBlock *BB, AllocInfo AllocInfo)
223       : DerivedUser(Type::getVoidTy(C), Vty, AllocInfo, DeleteValue),
224         Block(BB) {}
225 
226   // Use deleteValue() to delete a generic MemoryAccess.
227   ~MemoryAccess() = default;
228 
229 private:
230   BasicBlock *Block;
231 };
232 
233 template <>
234 struct ilist_alloc_traits<MemoryAccess> {
235   static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
236 };
237 
238 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
239   MA.print(OS);
240   return OS;
241 }
242 
243 /// Class that has the common methods + fields of memory uses/defs. It's
244 /// a little awkward to have, but there are many cases where we want either a
245 /// use or def, and there are many cases where uses are needed (defs aren't
246 /// acceptable), and vice-versa.
247 ///
248 /// This class should never be instantiated directly; make a MemoryUse or
249 /// MemoryDef instead.
250 class MemoryUseOrDef : public MemoryAccess {
251 public:
252   void *operator new(size_t) = delete;
253 
254   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
255 
256   /// Get the instruction that this MemoryUse represents.
257   Instruction *getMemoryInst() const { return MemoryInstruction; }
258 
259   /// Get the access that produces the memory state used by this Use.
260   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
261 
262   static bool classof(const Value *MA) {
263     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
264   }
265 
266   /// Do we have an optimized use?
267   inline bool isOptimized() const;
268   /// Return the MemoryAccess associated with the optimized use, or nullptr.
269   inline MemoryAccess *getOptimized() const;
270   /// Sets the optimized use for a MemoryDef.
271   inline void setOptimized(MemoryAccess *);
272 
273   /// Reset the ID of what this MemoryUse was optimized to, causing it to
274   /// be rewalked by the walker if necessary.
275   /// This really should only be called by tests.
276   inline void resetOptimized();
277 
278 protected:
279   friend class MemorySSA;
280   friend class MemorySSAUpdater;
281 
282   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
283                  DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
284                  AllocInfo AllocInfo)
285       : MemoryAccess(C, Vty, DeleteValue, BB, AllocInfo),
286         MemoryInstruction(MI) {
287     setDefiningAccess(DMA);
288   }
289 
290   // Use deleteValue() to delete a generic MemoryUseOrDef.
291   ~MemoryUseOrDef() = default;
292 
293   void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
294     if (!Optimized) {
295       setOperand(0, DMA);
296       return;
297     }
298     setOptimized(DMA);
299   }
300 
301 private:
302   Instruction *MemoryInstruction;
303 };
304 
305 /// Represents read-only accesses to memory
306 ///
307 /// In particular, the set of Instructions that will be represented by
308 /// MemoryUse's is exactly the set of Instructions for which
309 /// AliasAnalysis::getModRefInfo returns "Ref".
310 class MemoryUse final : public MemoryUseOrDef {
311   constexpr static IntrusiveOperandsAllocMarker AllocMarker{1};
312 
313 public:
314   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
315 
316   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
317       : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB, AllocMarker) {}
318 
319   // allocate space for exactly one operand
320   void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
321   void operator delete(void *Ptr) { User::operator delete(Ptr); }
322 
323   static bool classof(const Value *MA) {
324     return MA->getValueID() == MemoryUseVal;
325   }
326 
327   LLVM_ABI void print(raw_ostream &OS) const;
328 
329   void setOptimized(MemoryAccess *DMA) {
330     OptimizedID = DMA->getID();
331     setOperand(0, DMA);
332   }
333 
334   /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called,
335   /// uses will usually be optimized, but this is not guaranteed (e.g. due to
336   /// invalidation and optimization limits.)
337   bool isOptimized() const {
338     return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
339   }
340 
341   MemoryAccess *getOptimized() const {
342     return getDefiningAccess();
343   }
344 
345   void resetOptimized() {
346     OptimizedID = INVALID_MEMORYACCESS_ID;
347   }
348 
349 protected:
350   friend class MemorySSA;
351 
352 private:
353   static void deleteMe(DerivedUser *Self);
354 
355   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
356 };
357 
358 template <>
359 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
360 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
361 
362 /// Represents a read-write access to memory, whether it is a must-alias,
363 /// or a may-alias.
364 ///
365 /// In particular, the set of Instructions that will be represented by
366 /// MemoryDef's is exactly the set of Instructions for which
367 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
368 /// Note that, in order to provide def-def chains, all defs also have a use
369 /// associated with them. This use points to the nearest reaching
370 /// MemoryDef/MemoryPhi.
371 class MemoryDef final : public MemoryUseOrDef {
372   constexpr static IntrusiveOperandsAllocMarker AllocMarker{2};
373 
374 public:
375   friend class MemorySSA;
376 
377   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
378 
379   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
380             unsigned Ver)
381       : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB, AllocMarker),
382         ID(Ver) {}
383 
384   // allocate space for exactly two operands
385   void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
386   void operator delete(void *Ptr) { User::operator delete(Ptr); }
387 
388   static bool classof(const Value *MA) {
389     return MA->getValueID() == MemoryDefVal;
390   }
391 
392   void setOptimized(MemoryAccess *MA) {
393     setOperand(1, MA);
394     OptimizedID = MA->getID();
395   }
396 
397   MemoryAccess *getOptimized() const {
398     return cast_or_null<MemoryAccess>(getOperand(1));
399   }
400 
401   bool isOptimized() const {
402     return getOptimized() && OptimizedID == getOptimized()->getID();
403   }
404 
405   void resetOptimized() {
406     OptimizedID = INVALID_MEMORYACCESS_ID;
407     setOperand(1, nullptr);
408   }
409 
410   LLVM_ABI void print(raw_ostream &OS) const;
411 
412   unsigned getID() const { return ID; }
413 
414 private:
415   static void deleteMe(DerivedUser *Self);
416 
417   const unsigned ID;
418   unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
419 };
420 
421 template <>
422 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
423 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
424 
425 template <>
426 struct OperandTraits<MemoryUseOrDef> {
427   static Use *op_begin(MemoryUseOrDef *MUD) {
428     if (auto *MU = dyn_cast<MemoryUse>(MUD))
429       return OperandTraits<MemoryUse>::op_begin(MU);
430     return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
431   }
432 
433   static Use *op_end(MemoryUseOrDef *MUD) {
434     if (auto *MU = dyn_cast<MemoryUse>(MUD))
435       return OperandTraits<MemoryUse>::op_end(MU);
436     return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
437   }
438 
439   static unsigned operands(const MemoryUseOrDef *MUD) {
440     if (const auto *MU = dyn_cast<MemoryUse>(MUD))
441       return OperandTraits<MemoryUse>::operands(MU);
442     return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
443   }
444 };
445 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
446 
447 /// Represents phi nodes for memory accesses.
448 ///
449 /// These have the same semantic as regular phi nodes, with the exception that
450 /// only one phi will ever exist in a given basic block.
451 /// Guaranteeing one phi per block means guaranteeing there is only ever one
452 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
453 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
454 /// a MemoryPhi's operands.
455 /// That is, given
456 /// if (a) {
457 ///   store %a
458 ///   store %b
459 /// }
460 /// it *must* be transformed into
461 /// if (a) {
462 ///    1 = MemoryDef(liveOnEntry)
463 ///    store %a
464 ///    2 = MemoryDef(1)
465 ///    store %b
466 /// }
467 /// and *not*
468 /// if (a) {
469 ///    1 = MemoryDef(liveOnEntry)
470 ///    store %a
471 ///    2 = MemoryDef(liveOnEntry)
472 ///    store %b
473 /// }
474 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
475 /// end of the branch, and if there are not two phi nodes, one will be
476 /// disconnected completely from the SSA graph below that point.
477 /// Because MemoryUse's do not generate new definitions, they do not have this
478 /// issue.
479 class MemoryPhi final : public MemoryAccess {
480   constexpr static HungOffOperandsAllocMarker AllocMarker{};
481 
482   // allocate space for exactly zero operands
483   void *operator new(size_t S) { return User::operator new(S, AllocMarker); }
484 
485 public:
486   void operator delete(void *Ptr) { User::operator delete(Ptr); }
487 
488   /// Provide fast operand accessors
489   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
490 
491   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
492       : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, AllocMarker), ID(Ver),
493         ReservedSpace(NumPreds) {
494     allocHungoffUses(ReservedSpace);
495   }
496 
497   // Block iterator interface. This provides access to the list of incoming
498   // basic blocks, which parallels the list of incoming values.
499   using block_iterator = BasicBlock **;
500   using const_block_iterator = BasicBlock *const *;
501 
502   block_iterator block_begin() {
503     return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
504   }
505 
506   const_block_iterator block_begin() const {
507     return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
508   }
509 
510   block_iterator block_end() { return block_begin() + getNumOperands(); }
511 
512   const_block_iterator block_end() const {
513     return block_begin() + getNumOperands();
514   }
515 
516   iterator_range<block_iterator> blocks() {
517     return make_range(block_begin(), block_end());
518   }
519 
520   iterator_range<const_block_iterator> blocks() const {
521     return make_range(block_begin(), block_end());
522   }
523 
524   op_range incoming_values() { return operands(); }
525 
526   const_op_range incoming_values() const { return operands(); }
527 
528   /// Return the number of incoming edges
529   unsigned getNumIncomingValues() const { return getNumOperands(); }
530 
531   /// Return incoming value number x
532   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
533   void setIncomingValue(unsigned I, MemoryAccess *V) {
534     assert(V && "PHI node got a null value!");
535     setOperand(I, V);
536   }
537 
538   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
539   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
540 
541   /// Return incoming basic block number @p i.
542   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
543 
544   /// Return incoming basic block corresponding
545   /// to an operand of the PHI.
546   BasicBlock *getIncomingBlock(const Use &U) const {
547     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
548     return getIncomingBlock(unsigned(&U - op_begin()));
549   }
550 
551   /// Return incoming basic block corresponding
552   /// to value use iterator.
553   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
554     return getIncomingBlock(I.getUse());
555   }
556 
557   void setIncomingBlock(unsigned I, BasicBlock *BB) {
558     assert(BB && "PHI node got a null basic block!");
559     block_begin()[I] = BB;
560   }
561 
562   /// Add an incoming value to the end of the PHI list
563   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
564     if (getNumOperands() == ReservedSpace)
565       growOperands(); // Get more space!
566     // Initialize some new operands.
567     setNumHungOffUseOperands(getNumOperands() + 1);
568     setIncomingValue(getNumOperands() - 1, V);
569     setIncomingBlock(getNumOperands() - 1, BB);
570   }
571 
572   /// Return the first index of the specified basic
573   /// block in the value list for this PHI.  Returns -1 if no instance.
574   int getBasicBlockIndex(const BasicBlock *BB) const {
575     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
576       if (block_begin()[I] == BB)
577         return I;
578     return -1;
579   }
580 
581   MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
582     int Idx = getBasicBlockIndex(BB);
583     assert(Idx >= 0 && "Invalid basic block argument!");
584     return getIncomingValue(Idx);
585   }
586 
587   // After deleting incoming position I, the order of incoming may be changed.
588   void unorderedDeleteIncoming(unsigned I) {
589     unsigned E = getNumOperands();
590     assert(I < E && "Cannot remove out of bounds Phi entry.");
591     // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
592     // itself should be deleted.
593     assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
594                      "at least 2 values.");
595     setIncomingValue(I, getIncomingValue(E - 1));
596     setIncomingBlock(I, block_begin()[E - 1]);
597     setOperand(E - 1, nullptr);
598     block_begin()[E - 1] = nullptr;
599     setNumHungOffUseOperands(getNumOperands() - 1);
600   }
601 
602   // After deleting entries that satisfy Pred, remaining entries may have
603   // changed order.
604   template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
605     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
606       if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
607         unorderedDeleteIncoming(I);
608         E = getNumOperands();
609         --I;
610       }
611     assert(getNumOperands() >= 1 &&
612            "Cannot remove all incoming blocks in a MemoryPhi.");
613   }
614 
615   // After deleting incoming block BB, the incoming blocks order may be changed.
616   void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
617     unorderedDeleteIncomingIf(
618         [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
619   }
620 
621   // After deleting incoming memory access MA, the incoming accesses order may
622   // be changed.
623   void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
624     unorderedDeleteIncomingIf(
625         [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
626   }
627 
628   static bool classof(const Value *V) {
629     return V->getValueID() == MemoryPhiVal;
630   }
631 
632   LLVM_ABI void print(raw_ostream &OS) const;
633 
634   unsigned getID() const { return ID; }
635 
636 protected:
637   friend class MemorySSA;
638 
639   /// this is more complicated than the generic
640   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
641   /// values and pointers to the incoming blocks, all in one allocation.
642   void allocHungoffUses(unsigned N) {
643     User::allocHungoffUses(N, /* IsPhi */ true);
644   }
645 
646 private:
647   // For debugging only
648   const unsigned ID;
649   unsigned ReservedSpace;
650 
651   /// This grows the operand list in response to a push_back style of
652   /// operation.  This grows the number of ops by 1.5 times.
653   void growOperands() {
654     unsigned E = getNumOperands();
655     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
656     ReservedSpace = std::max(E + E / 2, 2u);
657     growHungoffUses(ReservedSpace, /* IsPhi */ true);
658   }
659 
660   static void deleteMe(DerivedUser *Self);
661 };
662 
663 inline unsigned MemoryAccess::getID() const {
664   assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
665          "only memory defs and phis have ids");
666   if (const auto *MD = dyn_cast<MemoryDef>(this))
667     return MD->getID();
668   return cast<MemoryPhi>(this)->getID();
669 }
670 
671 inline bool MemoryUseOrDef::isOptimized() const {
672   if (const auto *MD = dyn_cast<MemoryDef>(this))
673     return MD->isOptimized();
674   return cast<MemoryUse>(this)->isOptimized();
675 }
676 
677 inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
678   if (const auto *MD = dyn_cast<MemoryDef>(this))
679     return MD->getOptimized();
680   return cast<MemoryUse>(this)->getOptimized();
681 }
682 
683 inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
684   if (auto *MD = dyn_cast<MemoryDef>(this))
685     MD->setOptimized(MA);
686   else
687     cast<MemoryUse>(this)->setOptimized(MA);
688 }
689 
690 inline void MemoryUseOrDef::resetOptimized() {
691   if (auto *MD = dyn_cast<MemoryDef>(this))
692     MD->resetOptimized();
693   else
694     cast<MemoryUse>(this)->resetOptimized();
695 }
696 
697 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits {};
698 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
699 
700 /// Encapsulates MemorySSA, including all data associated with memory
701 /// accesses.
702 class MemorySSA {
703 public:
704   LLVM_ABI MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
705   LLVM_ABI MemorySSA(Loop &, AliasAnalysis *, DominatorTree *);
706 
707   // MemorySSA must remain where it's constructed; Walkers it creates store
708   // pointers to it.
709   MemorySSA(MemorySSA &&) = delete;
710 
711   LLVM_ABI ~MemorySSA();
712 
713   LLVM_ABI MemorySSAWalker *getWalker();
714   LLVM_ABI MemorySSAWalker *getSkipSelfWalker();
715 
716   /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
717   /// access associated with it. If passed a basic block gets the memory phi
718   /// node that exists for that block, if there is one. Otherwise, this will get
719   /// a MemoryUseOrDef.
720   MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
721     return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
722   }
723 
724   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
725     return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
726   }
727 
728   DominatorTree &getDomTree() const { return *DT; }
729 
730   LLVM_ABI void dump() const;
731   LLVM_ABI void print(raw_ostream &) const;
732 
733   /// Return true if \p MA represents the live on entry value
734   ///
735   /// Loads and stores from pointer arguments and other global values may be
736   /// defined by memory operations that do not occur in the current function, so
737   /// they may be live on entry to the function. MemorySSA represents such
738   /// memory state by the live on entry definition, which is guaranteed to occur
739   /// before any other memory access in the function.
740   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
741     return MA == LiveOnEntryDef.get();
742   }
743 
744   inline MemoryAccess *getLiveOnEntryDef() const {
745     return LiveOnEntryDef.get();
746   }
747 
748   // Sadly, iplists, by default, owns and deletes pointers added to the
749   // list. It's not currently possible to have two iplists for the same type,
750   // where one owns the pointers, and one does not. This is because the traits
751   // are per-type, not per-tag.  If this ever changes, we should make the
752   // DefList an iplist.
753   using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
754   using DefsList =
755       simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;
756 
757   /// Return the list of MemoryAccess's for a given basic block.
758   ///
759   /// This list is not modifiable by the user.
760   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
761     return getWritableBlockAccesses(BB);
762   }
763 
764   /// Return the list of MemoryDef's and MemoryPhi's for a given basic
765   /// block.
766   ///
767   /// This list is not modifiable by the user.
768   const DefsList *getBlockDefs(const BasicBlock *BB) const {
769     return getWritableBlockDefs(BB);
770   }
771 
772   /// Given two memory accesses in the same basic block, determine
773   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
774   LLVM_ABI bool locallyDominates(const MemoryAccess *A,
775                                  const MemoryAccess *B) const;
776 
777   /// Given two memory accesses in potentially different blocks,
778   /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
779   LLVM_ABI bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
780 
781   /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
782   /// dominates Use \p B.
783   LLVM_ABI bool dominates(const MemoryAccess *A, const Use &B) const;
784 
785   enum class VerificationLevel { Fast, Full };
786   /// Verify that MemorySSA is self consistent (IE definitions dominate
787   /// all uses, uses appear in the right places).  This is used by unit tests.
788   LLVM_ABI void
789       verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;
790 
791   /// Used in various insertion functions to specify whether we are talking
792   /// about the beginning or end of a block.
793   enum InsertionPlace { Beginning, End, BeforeTerminator };
794 
795   /// By default, uses are *not* optimized during MemorySSA construction.
796   /// Calling this method will attempt to optimize all MemoryUses, if this has
797   /// not happened yet for this MemorySSA instance. This should be done if you
798   /// plan to query the clobbering access for most uses, or if you walk the
799   /// def-use chain of uses.
800   LLVM_ABI void ensureOptimizedUses();
801 
802   AliasAnalysis &getAA() { return *AA; }
803 
804 protected:
805   // Used by Memory SSA dumpers and wrapper pass
806   friend class MemorySSAUpdater;
807 
808   template <typename IterT>
809   void verifyOrderingDominationAndDefUses(
810       IterT Blocks, VerificationLevel = VerificationLevel::Fast) const;
811   template <typename IterT> void verifyDominationNumbers(IterT Blocks) const;
812   template <typename IterT> void verifyPrevDefInPhis(IterT Blocks) const;
813 
814   // This is used by the use optimizer and updater.
815   AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
816     auto It = PerBlockAccesses.find(BB);
817     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
818   }
819 
820   // This is used by the use optimizer and updater.
821   DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
822     auto It = PerBlockDefs.find(BB);
823     return It == PerBlockDefs.end() ? nullptr : It->second.get();
824   }
825 
826   // These is used by the updater to perform various internal MemorySSA
827   // machinsations.  They do not always leave the IR in a correct state, and
828   // relies on the updater to fixup what it breaks, so it is not public.
829 
830   LLVM_ABI void moveTo(MemoryUseOrDef *What, BasicBlock *BB,
831                        AccessList::iterator Where);
832   LLVM_ABI void moveTo(MemoryAccess *What, BasicBlock *BB,
833                        InsertionPlace Point);
834 
835   // Rename the dominator tree branch rooted at BB.
836   void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
837                   SmallPtrSetImpl<BasicBlock *> &Visited) {
838     renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
839   }
840 
841   LLVM_ABI void removeFromLookups(MemoryAccess *);
842   LLVM_ABI void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
843   LLVM_ABI void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
844                                         InsertionPlace);
845   LLVM_ABI void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
846                                       AccessList::iterator);
847   LLVM_ABI MemoryUseOrDef *
848   createDefinedAccess(Instruction *, MemoryAccess *,
849                       const MemoryUseOrDef *Template = nullptr,
850                       bool CreationMustSucceed = true);
851 
852 private:
853   class ClobberWalkerBase;
854   class CachingWalker;
855   class SkipSelfWalker;
856   class OptimizeUses;
857 
858   CachingWalker *getWalkerImpl();
859   template <typename IterT>
860   void buildMemorySSA(BatchAAResults &BAA, IterT Blocks);
861 
862   void prepareForMoveTo(MemoryAccess *, BasicBlock *);
863   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
864 
865   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
866   using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;
867 
868   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
869   MemoryPhi *createMemoryPhi(BasicBlock *BB);
870   template <typename AliasAnalysisType>
871   MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
872                                   const MemoryUseOrDef *Template = nullptr);
873   void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
874   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
875   void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
876   LLVM_ABI void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
877                            SmallPtrSetImpl<BasicBlock *> &Visited,
878                            bool SkipVisited = false,
879                            bool RenameAllUses = false);
880   AccessList *getOrCreateAccessList(const BasicBlock *);
881   DefsList *getOrCreateDefsList(const BasicBlock *);
882   void renumberBlock(const BasicBlock *) const;
883   AliasAnalysis *AA = nullptr;
884   DominatorTree *DT;
885   Function *F = nullptr;
886   Loop *L = nullptr;
887 
888   // Memory SSA mappings
889   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
890 
891   // These two mappings contain the main block to access/def mappings for
892   // MemorySSA. The list contained in PerBlockAccesses really owns all the
893   // MemoryAccesses.
894   // Both maps maintain the invariant that if a block is found in them, the
895   // corresponding list is not empty, and if a block is not found in them, the
896   // corresponding list is empty.
897   AccessMap PerBlockAccesses;
898   DefsMap PerBlockDefs;
899   std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;
900 
901   // Domination mappings
902   // Note that the numbering is local to a block, even though the map is
903   // global.
904   mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
905   mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;
906 
907   // Memory SSA building info
908   std::unique_ptr<ClobberWalkerBase> WalkerBase;
909   std::unique_ptr<CachingWalker> Walker;
910   std::unique_ptr<SkipSelfWalker> SkipWalker;
911   unsigned NextID = 0;
912   bool IsOptimized = false;
913 };
914 
915 /// Enables verification of MemorySSA.
916 ///
917 /// The checks which this flag enables is exensive and disabled by default
918 /// unless `EXPENSIVE_CHECKS` is defined.  The flag `-verify-memoryssa` can be
919 /// used to selectively enable the verification without re-compilation.
920 LLVM_ABI extern bool VerifyMemorySSA;
921 
922 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
923 class MemorySSAUtil {
924 protected:
925   friend class GVNHoist;
926   friend class MemorySSAWalker;
927 
928   // This function should not be used by new passes.
929   LLVM_ABI static bool defClobbersUseOrDef(MemoryDef *MD,
930                                            const MemoryUseOrDef *MU,
931                                            AliasAnalysis &AA);
932 };
933 
934 /// An analysis that produces \c MemorySSA for a function.
935 ///
936 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
937   friend AnalysisInfoMixin<MemorySSAAnalysis>;
938 
939   LLVM_ABI static AnalysisKey Key;
940 
941 public:
942   // Wrap MemorySSA result to ensure address stability of internal MemorySSA
943   // pointers after construction.  Use a wrapper class instead of plain
944   // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
945   struct Result {
946     Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}
947 
948     MemorySSA &getMSSA() { return *MSSA; }
949 
950     std::unique_ptr<MemorySSA> MSSA;
951 
952     LLVM_ABI bool invalidate(Function &F, const PreservedAnalyses &PA,
953                              FunctionAnalysisManager::Invalidator &Inv);
954   };
955 
956   LLVM_ABI Result run(Function &F, FunctionAnalysisManager &AM);
957 };
958 
959 /// Printer pass for \c MemorySSA.
960 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
961   raw_ostream &OS;
962   bool EnsureOptimizedUses;
963 
964 public:
965   explicit MemorySSAPrinterPass(raw_ostream &OS, bool EnsureOptimizedUses)
966       : OS(OS), EnsureOptimizedUses(EnsureOptimizedUses) {}
967 
968   LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
969 
970   static bool isRequired() { return true; }
971 };
972 
973 /// Printer pass for \c MemorySSA via the walker.
974 class MemorySSAWalkerPrinterPass
975     : public PassInfoMixin<MemorySSAWalkerPrinterPass> {
976   raw_ostream &OS;
977 
978 public:
979   explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {}
980 
981   LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
982 
983   static bool isRequired() { return true; }
984 };
985 
986 /// Verifier pass for \c MemorySSA.
987 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
988   LLVM_ABI PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
989   static bool isRequired() { return true; }
990 };
991 
992 /// Legacy analysis pass which computes \c MemorySSA.
993 class LLVM_ABI MemorySSAWrapperPass : public FunctionPass {
994 public:
995   MemorySSAWrapperPass();
996 
997   static char ID;
998 
999   bool runOnFunction(Function &) override;
1000   void releaseMemory() override;
1001   MemorySSA &getMSSA() { return *MSSA; }
1002   const MemorySSA &getMSSA() const { return *MSSA; }
1003 
1004   void getAnalysisUsage(AnalysisUsage &AU) const override;
1005 
1006   void verifyAnalysis() const override;
1007   void print(raw_ostream &OS, const Module *M = nullptr) const override;
1008 
1009 private:
1010   std::unique_ptr<MemorySSA> MSSA;
1011 };
1012 
1013 /// This is the generic walker interface for walkers of MemorySSA.
1014 /// Walkers are used to be able to further disambiguate the def-use chains
1015 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
1016 /// you.
1017 /// In particular, while the def-use chains provide basic information, and are
1018 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
1019 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
1020 /// information. In particular, they may want to use SCEV info to further
1021 /// disambiguate memory accesses, or they may want the nearest dominating
1022 /// may-aliasing MemoryDef for a call or a store. This API enables a
1023 /// standardized interface to getting and using that info.
1024 class MemorySSAWalker {
1025 public:
1026   LLVM_ABI MemorySSAWalker(MemorySSA *);
1027   virtual ~MemorySSAWalker() = default;
1028 
1029   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
1030 
1031   /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
1032   /// will give you the nearest dominating MemoryAccess that Mod's the location
1033   /// the instruction accesses (by skipping any def which AA can prove does not
1034   /// alias the location(s) accessed by the instruction given).
1035   ///
1036   /// Note that this will return a single access, and it must dominate the
1037   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
1038   /// this will return the MemoryPhi, not the operand. This means that
1039   /// given:
1040   /// if (a) {
1041   ///   1 = MemoryDef(liveOnEntry)
1042   ///   store %a
1043   /// } else {
1044   ///   2 = MemoryDef(liveOnEntry)
1045   ///   store %b
1046   /// }
1047   /// 3 = MemoryPhi(2, 1)
1048   /// MemoryUse(3)
1049   /// load %a
1050   ///
1051   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1052   /// in the if (a) branch.
1053   MemoryAccess *getClobberingMemoryAccess(const Instruction *I,
1054                                           BatchAAResults &AA) {
1055     MemoryAccess *MA = MSSA->getMemoryAccess(I);
1056     assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
1057     return getClobberingMemoryAccess(MA, AA);
1058   }
1059 
1060   /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1061   /// but takes a MemoryAccess instead of an Instruction.
1062   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1063                                                   BatchAAResults &AA) = 0;
1064 
1065   /// Given a potentially clobbering memory access and a new location,
1066   /// calling this will give you the nearest dominating clobbering MemoryAccess
1067   /// (by skipping non-aliasing def links).
1068   ///
1069   /// This version of the function is mainly used to disambiguate phi translated
1070   /// pointers, where the value of a pointer may have changed from the initial
1071   /// memory access. Note that this expects to be handed either a MemoryUse,
1072   /// or an already potentially clobbering access. Unlike the above API, if
1073   /// given a MemoryDef that clobbers the pointer as the starting access, it
1074   /// will return that MemoryDef, whereas the above would return the clobber
1075   /// starting from the use side of  the memory def.
1076   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1077                                                   const MemoryLocation &,
1078                                                   BatchAAResults &AA) = 0;
1079 
1080   MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
1081     BatchAAResults BAA(MSSA->getAA());
1082     return getClobberingMemoryAccess(I, BAA);
1083   }
1084 
1085   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) {
1086     BatchAAResults BAA(MSSA->getAA());
1087     return getClobberingMemoryAccess(MA, BAA);
1088   }
1089 
1090   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1091                                           const MemoryLocation &Loc) {
1092     BatchAAResults BAA(MSSA->getAA());
1093     return getClobberingMemoryAccess(MA, Loc, BAA);
1094   }
1095 
1096   /// Given a memory access, invalidate anything this walker knows about
1097   /// that access.
1098   /// This API is used by walkers that store information to perform basic cache
1099   /// invalidation.  This will be called by MemorySSA at appropriate times for
1100   /// the walker it uses or returns.
1101   virtual void invalidateInfo(MemoryAccess *) {}
1102 
1103 protected:
1104   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
1105                           // constructor.
1106   MemorySSA *MSSA;
1107 };
1108 
1109 /// A MemorySSAWalker that does no alias queries, or anything else. It
1110 /// simply returns the links as they were constructed by the builder.
1111 class LLVM_ABI DoNothingMemorySSAWalker final : public MemorySSAWalker {
1112 public:
1113   // Keep the overrides below from hiding the Instruction overload of
1114   // getClobberingMemoryAccess.
1115   using MemorySSAWalker::getClobberingMemoryAccess;
1116 
1117   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1118                                           BatchAAResults &) override;
1119   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
1120                                           const MemoryLocation &,
1121                                           BatchAAResults &) override;
1122 };
1123 
1124 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
1125 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
1126 
1127 /// Iterator base class used to implement const and non-const iterators
1128 /// over the defining accesses of a MemoryAccess.
1129 template <class T>
1130 class memoryaccess_def_iterator_base
1131     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
1132                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
1133                                   T *> {
1134   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
1135 
1136 public:
1137   memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
1138   memoryaccess_def_iterator_base() = default;
1139 
1140   bool operator==(const memoryaccess_def_iterator_base &Other) const {
1141     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
1142   }
1143 
1144   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1145   // block from the operand in constant time (In a PHINode, the uselist has
1146   // both, so it's just subtraction). We provide it as part of the
1147   // iterator to avoid callers having to linear walk to get the block.
1148   // If the operation becomes constant time on MemoryPHI's, this bit of
1149   // abstraction breaking should be removed.
1150   BasicBlock *getPhiArgBlock() const {
1151     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
1152     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
1153     return MP->getIncomingBlock(ArgNo);
1154   }
1155 
1156   typename std::iterator_traits<BaseT>::pointer operator*() const {
1157     assert(Access && "Tried to access past the end of our iterator");
1158     // Go to the first argument for phis, and the defining access for everything
1159     // else.
1160     if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
1161       return MP->getIncomingValue(ArgNo);
1162     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
1163   }
1164 
1165   using BaseT::operator++;
1166   memoryaccess_def_iterator_base &operator++() {
1167     assert(Access && "Hit end of iterator");
1168     if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
1169       if (++ArgNo >= MP->getNumIncomingValues()) {
1170         ArgNo = 0;
1171         Access = nullptr;
1172       }
1173     } else {
1174       Access = nullptr;
1175     }
1176     return *this;
1177   }
1178 
1179 private:
1180   T *Access = nullptr;
1181   unsigned ArgNo = 0;
1182 };
1183 
1184 inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
1185   return memoryaccess_def_iterator(this);
1186 }
1187 
1188 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
1189   return const_memoryaccess_def_iterator(this);
1190 }
1191 
1192 inline memoryaccess_def_iterator MemoryAccess::defs_end() {
1193   return memoryaccess_def_iterator();
1194 }
1195 
1196 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
1197   return const_memoryaccess_def_iterator();
1198 }
1199 
1200 /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1201 /// and uses in the inverse case.
1202 template <> struct GraphTraits<MemoryAccess *> {
1203   using NodeRef = MemoryAccess *;
1204   using ChildIteratorType = memoryaccess_def_iterator;
1205 
1206   static NodeRef getEntryNode(NodeRef N) { return N; }
1207   static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
1208   static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
1209 };
1210 
1211 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
1212   using NodeRef = MemoryAccess *;
1213   using ChildIteratorType = MemoryAccess::iterator;
1214 
1215   static NodeRef getEntryNode(NodeRef N) { return N; }
1216   static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1217   static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1218 };
1219 
1220 /// Provide an iterator that walks defs, giving both the memory access,
1221 /// and the current pointer location, updating the pointer location as it
1222 /// changes due to phi node translation.
1223 ///
1224 /// This iterator, while somewhat specialized, is what most clients actually
1225 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1226 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1227 /// memory location through phi nodes for the user.
1228 class upward_defs_iterator
1229     : public iterator_facade_base<upward_defs_iterator,
1230                                   std::forward_iterator_tag,
1231                                   const MemoryAccessPair> {
1232   using BaseT = upward_defs_iterator::iterator_facade_base;
1233 
1234 public:
1235   upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT)
1236       : DefIterator(Info.first), Location(Info.second),
1237         OriginalAccess(Info.first), DT(DT) {
1238     CurrentPair.first = nullptr;
1239 
1240     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
1241     fillInCurrentPair();
1242   }
1243 
1244   upward_defs_iterator() { CurrentPair.first = nullptr; }
1245 
1246   bool operator==(const upward_defs_iterator &Other) const {
1247     return DefIterator == Other.DefIterator;
1248   }
1249 
1250   typename std::iterator_traits<BaseT>::reference operator*() const {
1251     assert(DefIterator != OriginalAccess->defs_end() &&
1252            "Tried to access past the end of our iterator");
1253     return CurrentPair;
1254   }
1255 
1256   using BaseT::operator++;
1257   upward_defs_iterator &operator++() {
1258     assert(DefIterator != OriginalAccess->defs_end() &&
1259            "Tried to access past the end of the iterator");
1260     ++DefIterator;
1261     if (DefIterator != OriginalAccess->defs_end())
1262       fillInCurrentPair();
1263     return *this;
1264   }
1265 
1266   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1267 
1268 private:
1269   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1270   /// loop. In particular, this guarantees that it only references a single
1271   /// MemoryLocation during execution of the containing function.
1272   LLVM_ABI bool IsGuaranteedLoopInvariant(const Value *Ptr) const;
1273 
1274   void fillInCurrentPair() {
1275     CurrentPair.first = *DefIterator;
1276     CurrentPair.second = Location;
1277     if (WalkingPhi && Location.Ptr) {
1278       PHITransAddr Translator(
1279           const_cast<Value *>(Location.Ptr),
1280           OriginalAccess->getBlock()->getDataLayout(), nullptr);
1281 
1282       if (Value *Addr =
1283               Translator.translateValue(OriginalAccess->getBlock(),
1284                                         DefIterator.getPhiArgBlock(), DT, true))
1285         if (Addr != CurrentPair.second.Ptr)
1286           CurrentPair.second = CurrentPair.second.getWithNewPtr(Addr);
1287 
1288       // Mark size as unknown, if the location is not guaranteed to be
1289       // loop-invariant for any possible loop in the function. Setting the size
1290       // to unknown guarantees that any memory accesses that access locations
1291       // after the pointer are considered as clobbers, which is important to
1292       // catch loop carried dependences.
1293       if (!IsGuaranteedLoopInvariant(CurrentPair.second.Ptr))
1294         CurrentPair.second = CurrentPair.second.getWithNewSize(
1295             LocationSize::beforeOrAfterPointer());
1296     }
1297   }
1298 
1299   MemoryAccessPair CurrentPair;
1300   memoryaccess_def_iterator DefIterator;
1301   MemoryLocation Location;
1302   MemoryAccess *OriginalAccess = nullptr;
1303   DominatorTree *DT = nullptr;
1304   bool WalkingPhi = false;
1305 };
1306 
1307 inline upward_defs_iterator
1308 upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT) {
1309   return upward_defs_iterator(Pair, &DT);
1310 }
1311 
1312 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1313 
1314 inline iterator_range<upward_defs_iterator>
1315 upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
1316   return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
1317 }
1318 
1319 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1320 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1321 /// comparing against a null def_chain_iterator, this will compare equal only
1322 /// after walking said Phi/liveOnEntry.
1323 ///
1324 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1325 /// access chain, or all the accesses.
1326 ///
1327 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1328 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1329 /// a phi node.  The optimized chain walks the clobbering access of a store.
1330 /// So if you are just trying to find, given a store, what the next
1331 /// thing that would clobber the same memory is, you want the optimized chain.
1332 template <class T, bool UseOptimizedChain = false>
1333 struct def_chain_iterator
1334     : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
1335                                   std::forward_iterator_tag, MemoryAccess *> {
1336   def_chain_iterator() : MA(nullptr) {}
1337   def_chain_iterator(T MA) : MA(MA) {}
1338 
1339   T operator*() const { return MA; }
1340 
1341   def_chain_iterator &operator++() {
1342     // N.B. liveOnEntry has a null defining access.
1343     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1344       if (UseOptimizedChain && MUD->isOptimized())
1345         MA = MUD->getOptimized();
1346       else
1347         MA = MUD->getDefiningAccess();
1348     } else {
1349       MA = nullptr;
1350     }
1351 
1352     return *this;
1353   }
1354 
1355   bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1356 
1357 private:
1358   T MA;
1359 };
1360 
1361 template <class T>
1362 inline iterator_range<def_chain_iterator<T>>
1363 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1364 #ifdef EXPENSIVE_CHECKS
1365   assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
1366          "UpTo isn't in the def chain!");
1367 #endif
1368   return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
1369 }
1370 
1371 template <class T>
1372 inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
1373   return make_range(def_chain_iterator<T, true>(MA),
1374                     def_chain_iterator<T, true>(nullptr));
1375 }
1376 
1377 } // end namespace llvm
1378 
1379 #endif // LLVM_ANALYSIS_MEMORYSSA_H
1380