xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/NewGVN.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- NewGVN.cpp - Global Value Numbering Pass ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the new LLVM's Global Value Numbering pass.
11 /// GVN partitions values computed by a function into congruence classes.
12 /// Values ending up in the same congruence class are guaranteed to be the same
13 /// for every execution of the program. In that respect, congruency is a
14 /// compile-time approximation of equivalence of values at runtime.
15 /// The algorithm implemented here uses a sparse formulation and it's based
16 /// on the ideas described in the paper:
17 /// "A Sparse Algorithm for Predicated Global Value Numbering" from
18 /// Karthik Gargi.
19 ///
20 /// A brief overview of the algorithm: The algorithm is essentially the same as
21 /// the standard RPO value numbering algorithm (a good reference is the paper
22 /// "SCC based value numbering" by L. Taylor Simpson) with one major difference:
23 /// The RPO algorithm proceeds, on every iteration, to process every reachable
24 /// block and every instruction in that block.  This is because the standard RPO
25 /// algorithm does not track what things have the same value number, it only
26 /// tracks what the value number of a given operation is (the mapping is
27 /// operation -> value number).  Thus, when a value number of an operation
28 /// changes, it must reprocess everything to ensure all uses of a value number
29 /// get updated properly.  In constrast, the sparse algorithm we use *also*
30 /// tracks what operations have a given value number (IE it also tracks the
31 /// reverse mapping from value number -> operations with that value number), so
32 /// that it only needs to reprocess the instructions that are affected when
33 /// something's value number changes.  The vast majority of complexity and code
34 /// in this file is devoted to tracking what value numbers could change for what
35 /// instructions when various things happen.  The rest of the algorithm is
36 /// devoted to performing symbolic evaluation, forward propagation, and
37 /// simplification of operations based on the value numbers deduced so far
38 ///
39 /// In order to make the GVN mostly-complete, we use a technique derived from
40 /// "Detection of Redundant Expressions: A Complete and Polynomial-time
41 /// Algorithm in SSA" by R.R. Pai.  The source of incompleteness in most SSA
42 /// based GVN algorithms is related to their inability to detect equivalence
43 /// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)).
44 /// We resolve this issue by generating the equivalent "phi of ops" form for
45 /// each op of phis we see, in a way that only takes polynomial time to resolve.
46 ///
47 /// We also do not perform elimination by using any published algorithm.  All
48 /// published algorithms are O(Instructions). Instead, we use a technique that
49 /// is O(number of operations with the same value number), enabling us to skip
50 /// trying to eliminate things that have unique value numbers.
51 //
52 //===----------------------------------------------------------------------===//
53 
54 #include "llvm/Transforms/Scalar/NewGVN.h"
55 #include "llvm/ADT/ArrayRef.h"
56 #include "llvm/ADT/BitVector.h"
57 #include "llvm/ADT/DenseMap.h"
58 #include "llvm/ADT/DenseMapInfo.h"
59 #include "llvm/ADT/DenseSet.h"
60 #include "llvm/ADT/DepthFirstIterator.h"
61 #include "llvm/ADT/GraphTraits.h"
62 #include "llvm/ADT/Hashing.h"
63 #include "llvm/ADT/PointerIntPair.h"
64 #include "llvm/ADT/PostOrderIterator.h"
65 #include "llvm/ADT/SetOperations.h"
66 #include "llvm/ADT/SmallPtrSet.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/SparseBitVector.h"
69 #include "llvm/ADT/Statistic.h"
70 #include "llvm/ADT/iterator_range.h"
71 #include "llvm/Analysis/AliasAnalysis.h"
72 #include "llvm/Analysis/AssumptionCache.h"
73 #include "llvm/Analysis/CFGPrinter.h"
74 #include "llvm/Analysis/ConstantFolding.h"
75 #include "llvm/Analysis/GlobalsModRef.h"
76 #include "llvm/Analysis/InstructionSimplify.h"
77 #include "llvm/Analysis/MemoryBuiltins.h"
78 #include "llvm/Analysis/MemorySSA.h"
79 #include "llvm/Analysis/TargetLibraryInfo.h"
80 #include "llvm/Analysis/ValueTracking.h"
81 #include "llvm/IR/Argument.h"
82 #include "llvm/IR/BasicBlock.h"
83 #include "llvm/IR/Constant.h"
84 #include "llvm/IR/Constants.h"
85 #include "llvm/IR/Dominators.h"
86 #include "llvm/IR/Function.h"
87 #include "llvm/IR/InstrTypes.h"
88 #include "llvm/IR/Instruction.h"
89 #include "llvm/IR/Instructions.h"
90 #include "llvm/IR/IntrinsicInst.h"
91 #include "llvm/IR/PatternMatch.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/Use.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/Support/Allocator.h"
97 #include "llvm/Support/ArrayRecycler.h"
98 #include "llvm/Support/Casting.h"
99 #include "llvm/Support/CommandLine.h"
100 #include "llvm/Support/Debug.h"
101 #include "llvm/Support/DebugCounter.h"
102 #include "llvm/Support/ErrorHandling.h"
103 #include "llvm/Support/PointerLikeTypeTraits.h"
104 #include "llvm/Support/raw_ostream.h"
105 #include "llvm/Transforms/Scalar/GVNExpression.h"
106 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
107 #include "llvm/Transforms/Utils/Local.h"
108 #include "llvm/Transforms/Utils/PredicateInfo.h"
109 #include "llvm/Transforms/Utils/VNCoercion.h"
110 #include <algorithm>
111 #include <cassert>
112 #include <cstdint>
113 #include <iterator>
114 #include <map>
115 #include <memory>
116 #include <set>
117 #include <string>
118 #include <tuple>
119 #include <utility>
120 #include <vector>
121 
122 using namespace llvm;
123 using namespace llvm::GVNExpression;
124 using namespace llvm::VNCoercion;
125 using namespace llvm::PatternMatch;
126 
127 #define DEBUG_TYPE "newgvn"
128 
129 STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted");
130 STATISTIC(NumGVNBlocksDeleted, "Number of blocks deleted");
131 STATISTIC(NumGVNOpsSimplified, "Number of Expressions simplified");
132 STATISTIC(NumGVNPhisAllSame, "Number of PHIs whos arguments are all the same");
133 STATISTIC(NumGVNMaxIterations,
134           "Maximum Number of iterations it took to converge GVN");
135 STATISTIC(NumGVNLeaderChanges, "Number of leader changes");
136 STATISTIC(NumGVNSortedLeaderChanges, "Number of sorted leader changes");
137 STATISTIC(NumGVNAvoidedSortedLeaderChanges,
138           "Number of avoided sorted leader changes");
139 STATISTIC(NumGVNDeadStores, "Number of redundant/dead stores eliminated");
140 STATISTIC(NumGVNPHIOfOpsCreated, "Number of PHI of ops created");
141 STATISTIC(NumGVNPHIOfOpsEliminations,
142           "Number of things eliminated using PHI of ops");
143 DEBUG_COUNTER(VNCounter, "newgvn-vn",
144               "Controls which instructions are value numbered");
145 DEBUG_COUNTER(PHIOfOpsCounter, "newgvn-phi",
146               "Controls which instructions we create phi of ops for");
147 // Currently store defining access refinement is too slow due to basicaa being
148 // egregiously slow.  This flag lets us keep it working while we work on this
149 // issue.
150 static cl::opt<bool> EnableStoreRefinement("enable-store-refinement",
151                                            cl::init(false), cl::Hidden);
152 
153 /// Currently, the generation "phi of ops" can result in correctness issues.
154 static cl::opt<bool> EnablePhiOfOps("enable-phi-of-ops", cl::init(true),
155                                     cl::Hidden);
156 
157 //===----------------------------------------------------------------------===//
158 //                                GVN Pass
159 //===----------------------------------------------------------------------===//
160 
161 // Anchor methods.
162 namespace llvm {
163 namespace GVNExpression {
164 
165 Expression::~Expression() = default;
166 BasicExpression::~BasicExpression() = default;
167 CallExpression::~CallExpression() = default;
168 LoadExpression::~LoadExpression() = default;
169 StoreExpression::~StoreExpression() = default;
170 AggregateValueExpression::~AggregateValueExpression() = default;
171 PHIExpression::~PHIExpression() = default;
172 
173 } // end namespace GVNExpression
174 } // end namespace llvm
175 
176 namespace {
177 
178 // Tarjan's SCC finding algorithm with Nuutila's improvements
179 // SCCIterator is actually fairly complex for the simple thing we want.
180 // It also wants to hand us SCC's that are unrelated to the phi node we ask
181 // about, and have us process them there or risk redoing work.
182 // Graph traits over a filter iterator also doesn't work that well here.
183 // This SCC finder is specialized to walk use-def chains, and only follows
184 // instructions,
185 // not generic values (arguments, etc).
186 struct TarjanSCC {
187   TarjanSCC() : Components(1) {}
188 
189   void Start(const Instruction *Start) {
190     if (Root.lookup(Start) == 0)
191       FindSCC(Start);
192   }
193 
194   const SmallPtrSetImpl<const Value *> &getComponentFor(const Value *V) const {
195     unsigned ComponentID = ValueToComponent.lookup(V);
196 
197     assert(ComponentID > 0 &&
198            "Asking for a component for a value we never processed");
199     return Components[ComponentID];
200   }
201 
202 private:
203   void FindSCC(const Instruction *I) {
204     Root[I] = ++DFSNum;
205     // Store the DFS Number we had before it possibly gets incremented.
206     unsigned int OurDFS = DFSNum;
207     for (const auto &Op : I->operands()) {
208       if (auto *InstOp = dyn_cast<Instruction>(Op)) {
209         if (Root.lookup(Op) == 0)
210           FindSCC(InstOp);
211         if (!InComponent.count(Op))
212           Root[I] = std::min(Root.lookup(I), Root.lookup(Op));
213       }
214     }
215     // See if we really were the root of a component, by seeing if we still have
216     // our DFSNumber.  If we do, we are the root of the component, and we have
217     // completed a component. If we do not, we are not the root of a component,
218     // and belong on the component stack.
219     if (Root.lookup(I) == OurDFS) {
220       unsigned ComponentID = Components.size();
221       Components.resize(Components.size() + 1);
222       auto &Component = Components.back();
223       Component.insert(I);
224       LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n");
225       InComponent.insert(I);
226       ValueToComponent[I] = ComponentID;
227       // Pop a component off the stack and label it.
228       while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) {
229         auto *Member = Stack.back();
230         LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n");
231         Component.insert(Member);
232         InComponent.insert(Member);
233         ValueToComponent[Member] = ComponentID;
234         Stack.pop_back();
235       }
236     } else {
237       // Part of a component, push to stack
238       Stack.push_back(I);
239     }
240   }
241 
242   unsigned int DFSNum = 1;
243   SmallPtrSet<const Value *, 8> InComponent;
244   DenseMap<const Value *, unsigned int> Root;
245   SmallVector<const Value *, 8> Stack;
246 
247   // Store the components as vector of ptr sets, because we need the topo order
248   // of SCC's, but not individual member order
249   SmallVector<SmallPtrSet<const Value *, 8>, 8> Components;
250 
251   DenseMap<const Value *, unsigned> ValueToComponent;
252 };
253 
254 // Congruence classes represent the set of expressions/instructions
255 // that are all the same *during some scope in the function*.
256 // That is, because of the way we perform equality propagation, and
257 // because of memory value numbering, it is not correct to assume
258 // you can willy-nilly replace any member with any other at any
259 // point in the function.
260 //
261 // For any Value in the Member set, it is valid to replace any dominated member
262 // with that Value.
263 //
264 // Every congruence class has a leader, and the leader is used to symbolize
265 // instructions in a canonical way (IE every operand of an instruction that is a
266 // member of the same congruence class will always be replaced with leader
267 // during symbolization).  To simplify symbolization, we keep the leader as a
268 // constant if class can be proved to be a constant value.  Otherwise, the
269 // leader is the member of the value set with the smallest DFS number.  Each
270 // congruence class also has a defining expression, though the expression may be
271 // null.  If it exists, it can be used for forward propagation and reassociation
272 // of values.
273 
274 // For memory, we also track a representative MemoryAccess, and a set of memory
275 // members for MemoryPhis (which have no real instructions). Note that for
276 // memory, it seems tempting to try to split the memory members into a
277 // MemoryCongruenceClass or something.  Unfortunately, this does not work
278 // easily.  The value numbering of a given memory expression depends on the
279 // leader of the memory congruence class, and the leader of memory congruence
280 // class depends on the value numbering of a given memory expression.  This
281 // leads to wasted propagation, and in some cases, missed optimization.  For
282 // example: If we had value numbered two stores together before, but now do not,
283 // we move them to a new value congruence class.  This in turn will move at one
284 // of the memorydefs to a new memory congruence class.  Which in turn, affects
285 // the value numbering of the stores we just value numbered (because the memory
286 // congruence class is part of the value number).  So while theoretically
287 // possible to split them up, it turns out to be *incredibly* complicated to get
288 // it to work right, because of the interdependency.  While structurally
289 // slightly messier, it is algorithmically much simpler and faster to do what we
290 // do here, and track them both at once in the same class.
291 // Note: The default iterators for this class iterate over values
292 class CongruenceClass {
293 public:
294   using MemberType = Value;
295   using MemberSet = SmallPtrSet<MemberType *, 4>;
296   using MemoryMemberType = MemoryPhi;
297   using MemoryMemberSet = SmallPtrSet<const MemoryMemberType *, 2>;
298 
299   explicit CongruenceClass(unsigned ID) : ID(ID) {}
300   CongruenceClass(unsigned ID, Value *Leader, const Expression *E)
301       : ID(ID), RepLeader(Leader), DefiningExpr(E) {}
302 
303   unsigned getID() const { return ID; }
304 
305   // True if this class has no members left.  This is mainly used for assertion
306   // purposes, and for skipping empty classes.
307   bool isDead() const {
308     // If it's both dead from a value perspective, and dead from a memory
309     // perspective, it's really dead.
310     return empty() && memory_empty();
311   }
312 
313   // Leader functions
314   Value *getLeader() const { return RepLeader; }
315   void setLeader(Value *Leader) { RepLeader = Leader; }
316   const std::pair<Value *, unsigned int> &getNextLeader() const {
317     return NextLeader;
318   }
319   void resetNextLeader() { NextLeader = {nullptr, ~0}; }
320   void addPossibleNextLeader(std::pair<Value *, unsigned int> LeaderPair) {
321     if (LeaderPair.second < NextLeader.second)
322       NextLeader = LeaderPair;
323   }
324 
325   Value *getStoredValue() const { return RepStoredValue; }
326   void setStoredValue(Value *Leader) { RepStoredValue = Leader; }
327   const MemoryAccess *getMemoryLeader() const { return RepMemoryAccess; }
328   void setMemoryLeader(const MemoryAccess *Leader) { RepMemoryAccess = Leader; }
329 
330   // Forward propagation info
331   const Expression *getDefiningExpr() const { return DefiningExpr; }
332 
333   // Value member set
334   bool empty() const { return Members.empty(); }
335   unsigned size() const { return Members.size(); }
336   MemberSet::const_iterator begin() const { return Members.begin(); }
337   MemberSet::const_iterator end() const { return Members.end(); }
338   void insert(MemberType *M) { Members.insert(M); }
339   void erase(MemberType *M) { Members.erase(M); }
340   void swap(MemberSet &Other) { Members.swap(Other); }
341 
342   // Memory member set
343   bool memory_empty() const { return MemoryMembers.empty(); }
344   unsigned memory_size() const { return MemoryMembers.size(); }
345   MemoryMemberSet::const_iterator memory_begin() const {
346     return MemoryMembers.begin();
347   }
348   MemoryMemberSet::const_iterator memory_end() const {
349     return MemoryMembers.end();
350   }
351   iterator_range<MemoryMemberSet::const_iterator> memory() const {
352     return make_range(memory_begin(), memory_end());
353   }
354 
355   void memory_insert(const MemoryMemberType *M) { MemoryMembers.insert(M); }
356   void memory_erase(const MemoryMemberType *M) { MemoryMembers.erase(M); }
357 
358   // Store count
359   unsigned getStoreCount() const { return StoreCount; }
360   void incStoreCount() { ++StoreCount; }
361   void decStoreCount() {
362     assert(StoreCount != 0 && "Store count went negative");
363     --StoreCount;
364   }
365 
366   // True if this class has no memory members.
367   bool definesNoMemory() const { return StoreCount == 0 && memory_empty(); }
368 
369   // Return true if two congruence classes are equivalent to each other. This
370   // means that every field but the ID number and the dead field are equivalent.
371   bool isEquivalentTo(const CongruenceClass *Other) const {
372     if (!Other)
373       return false;
374     if (this == Other)
375       return true;
376 
377     if (std::tie(StoreCount, RepLeader, RepStoredValue, RepMemoryAccess) !=
378         std::tie(Other->StoreCount, Other->RepLeader, Other->RepStoredValue,
379                  Other->RepMemoryAccess))
380       return false;
381     if (DefiningExpr != Other->DefiningExpr)
382       if (!DefiningExpr || !Other->DefiningExpr ||
383           *DefiningExpr != *Other->DefiningExpr)
384         return false;
385 
386     if (Members.size() != Other->Members.size())
387       return false;
388 
389     return llvm::set_is_subset(Members, Other->Members);
390   }
391 
392 private:
393   unsigned ID;
394 
395   // Representative leader.
396   Value *RepLeader = nullptr;
397 
398   // The most dominating leader after our current leader, because the member set
399   // is not sorted and is expensive to keep sorted all the time.
400   std::pair<Value *, unsigned int> NextLeader = {nullptr, ~0U};
401 
402   // If this is represented by a store, the value of the store.
403   Value *RepStoredValue = nullptr;
404 
405   // If this class contains MemoryDefs or MemoryPhis, this is the leading memory
406   // access.
407   const MemoryAccess *RepMemoryAccess = nullptr;
408 
409   // Defining Expression.
410   const Expression *DefiningExpr = nullptr;
411 
412   // Actual members of this class.
413   MemberSet Members;
414 
415   // This is the set of MemoryPhis that exist in the class. MemoryDefs and
416   // MemoryUses have real instructions representing them, so we only need to
417   // track MemoryPhis here.
418   MemoryMemberSet MemoryMembers;
419 
420   // Number of stores in this congruence class.
421   // This is used so we can detect store equivalence changes properly.
422   int StoreCount = 0;
423 };
424 
425 } // end anonymous namespace
426 
427 namespace llvm {
428 
429 struct ExactEqualsExpression {
430   const Expression &E;
431 
432   explicit ExactEqualsExpression(const Expression &E) : E(E) {}
433 
434   hash_code getComputedHash() const { return E.getComputedHash(); }
435 
436   bool operator==(const Expression &Other) const {
437     return E.exactlyEquals(Other);
438   }
439 };
440 
441 template <> struct DenseMapInfo<const Expression *> {
442   static const Expression *getEmptyKey() {
443     auto Val = static_cast<uintptr_t>(-1);
444     Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
445     return reinterpret_cast<const Expression *>(Val);
446   }
447 
448   static const Expression *getTombstoneKey() {
449     auto Val = static_cast<uintptr_t>(~1U);
450     Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
451     return reinterpret_cast<const Expression *>(Val);
452   }
453 
454   static unsigned getHashValue(const Expression *E) {
455     return E->getComputedHash();
456   }
457 
458   static unsigned getHashValue(const ExactEqualsExpression &E) {
459     return E.getComputedHash();
460   }
461 
462   static bool isEqual(const ExactEqualsExpression &LHS, const Expression *RHS) {
463     if (RHS == getTombstoneKey() || RHS == getEmptyKey())
464       return false;
465     return LHS == *RHS;
466   }
467 
468   static bool isEqual(const Expression *LHS, const Expression *RHS) {
469     if (LHS == RHS)
470       return true;
471     if (LHS == getTombstoneKey() || RHS == getTombstoneKey() ||
472         LHS == getEmptyKey() || RHS == getEmptyKey())
473       return false;
474     // Compare hashes before equality.  This is *not* what the hashtable does,
475     // since it is computing it modulo the number of buckets, whereas we are
476     // using the full hash keyspace.  Since the hashes are precomputed, this
477     // check is *much* faster than equality.
478     if (LHS->getComputedHash() != RHS->getComputedHash())
479       return false;
480     return *LHS == *RHS;
481   }
482 };
483 
484 } // end namespace llvm
485 
486 namespace {
487 
488 class NewGVN {
489   Function &F;
490   DominatorTree *DT = nullptr;
491   const TargetLibraryInfo *TLI = nullptr;
492   AliasAnalysis *AA = nullptr;
493   MemorySSA *MSSA = nullptr;
494   MemorySSAWalker *MSSAWalker = nullptr;
495   AssumptionCache *AC = nullptr;
496   const DataLayout &DL;
497   std::unique_ptr<PredicateInfo> PredInfo;
498 
499   // These are the only two things the create* functions should have
500   // side-effects on due to allocating memory.
501   mutable BumpPtrAllocator ExpressionAllocator;
502   mutable ArrayRecycler<Value *> ArgRecycler;
503   mutable TarjanSCC SCCFinder;
504   const SimplifyQuery SQ;
505 
506   // Number of function arguments, used by ranking
507   unsigned int NumFuncArgs = 0;
508 
509   // RPOOrdering of basic blocks
510   DenseMap<const DomTreeNode *, unsigned> RPOOrdering;
511 
512   // Congruence class info.
513 
514   // This class is called INITIAL in the paper. It is the class everything
515   // startsout in, and represents any value. Being an optimistic analysis,
516   // anything in the TOP class has the value TOP, which is indeterminate and
517   // equivalent to everything.
518   CongruenceClass *TOPClass = nullptr;
519   std::vector<CongruenceClass *> CongruenceClasses;
520   unsigned NextCongruenceNum = 0;
521 
522   // Value Mappings.
523   DenseMap<Value *, CongruenceClass *> ValueToClass;
524   DenseMap<Value *, const Expression *> ValueToExpression;
525 
526   // Value PHI handling, used to make equivalence between phi(op, op) and
527   // op(phi, phi).
528   // These mappings just store various data that would normally be part of the
529   // IR.
530   SmallPtrSet<const Instruction *, 8> PHINodeUses;
531 
532   DenseMap<const Value *, bool> OpSafeForPHIOfOps;
533 
534   // Map a temporary instruction we created to a parent block.
535   DenseMap<const Value *, BasicBlock *> TempToBlock;
536 
537   // Map between the already in-program instructions and the temporary phis we
538   // created that they are known equivalent to.
539   DenseMap<const Value *, PHINode *> RealToTemp;
540 
541   // In order to know when we should re-process instructions that have
542   // phi-of-ops, we track the set of expressions that they needed as
543   // leaders. When we discover new leaders for those expressions, we process the
544   // associated phi-of-op instructions again in case they have changed.  The
545   // other way they may change is if they had leaders, and those leaders
546   // disappear.  However, at the point they have leaders, there are uses of the
547   // relevant operands in the created phi node, and so they will get reprocessed
548   // through the normal user marking we perform.
549   mutable DenseMap<const Value *, SmallPtrSet<Value *, 2>> AdditionalUsers;
550   DenseMap<const Expression *, SmallPtrSet<Instruction *, 2>>
551       ExpressionToPhiOfOps;
552 
553   // Map from temporary operation to MemoryAccess.
554   DenseMap<const Instruction *, MemoryUseOrDef *> TempToMemory;
555 
556   // Set of all temporary instructions we created.
557   // Note: This will include instructions that were just created during value
558   // numbering.  The way to test if something is using them is to check
559   // RealToTemp.
560   DenseSet<Instruction *> AllTempInstructions;
561 
562   // This is the set of instructions to revisit on a reachability change.  At
563   // the end of the main iteration loop it will contain at least all the phi of
564   // ops instructions that will be changed to phis, as well as regular phis.
565   // During the iteration loop, it may contain other things, such as phi of ops
566   // instructions that used edge reachability to reach a result, and so need to
567   // be revisited when the edge changes, independent of whether the phi they
568   // depended on changes.
569   DenseMap<BasicBlock *, SparseBitVector<>> RevisitOnReachabilityChange;
570 
571   // Mapping from predicate info we used to the instructions we used it with.
572   // In order to correctly ensure propagation, we must keep track of what
573   // comparisons we used, so that when the values of the comparisons change, we
574   // propagate the information to the places we used the comparison.
575   mutable DenseMap<const Value *, SmallPtrSet<Instruction *, 2>>
576       PredicateToUsers;
577 
578   // the same reasoning as PredicateToUsers.  When we skip MemoryAccesses for
579   // stores, we no longer can rely solely on the def-use chains of MemorySSA.
580   mutable DenseMap<const MemoryAccess *, SmallPtrSet<MemoryAccess *, 2>>
581       MemoryToUsers;
582 
583   // A table storing which memorydefs/phis represent a memory state provably
584   // equivalent to another memory state.
585   // We could use the congruence class machinery, but the MemoryAccess's are
586   // abstract memory states, so they can only ever be equivalent to each other,
587   // and not to constants, etc.
588   DenseMap<const MemoryAccess *, CongruenceClass *> MemoryAccessToClass;
589 
590   // We could, if we wanted, build MemoryPhiExpressions and
591   // MemoryVariableExpressions, etc, and value number them the same way we value
592   // number phi expressions.  For the moment, this seems like overkill.  They
593   // can only exist in one of three states: they can be TOP (equal to
594   // everything), Equivalent to something else, or unique.  Because we do not
595   // create expressions for them, we need to simulate leader change not just
596   // when they change class, but when they change state.  Note: We can do the
597   // same thing for phis, and avoid having phi expressions if we wanted, We
598   // should eventually unify in one direction or the other, so this is a little
599   // bit of an experiment in which turns out easier to maintain.
600   enum MemoryPhiState { MPS_Invalid, MPS_TOP, MPS_Equivalent, MPS_Unique };
601   DenseMap<const MemoryPhi *, MemoryPhiState> MemoryPhiState;
602 
603   enum InstCycleState { ICS_Unknown, ICS_CycleFree, ICS_Cycle };
604   mutable DenseMap<const Instruction *, InstCycleState> InstCycleState;
605 
606   // Expression to class mapping.
607   using ExpressionClassMap = DenseMap<const Expression *, CongruenceClass *>;
608   ExpressionClassMap ExpressionToClass;
609 
610   // We have a single expression that represents currently DeadExpressions.
611   // For dead expressions we can prove will stay dead, we mark them with
612   // DFS number zero.  However, it's possible in the case of phi nodes
613   // for us to assume/prove all arguments are dead during fixpointing.
614   // We use DeadExpression for that case.
615   DeadExpression *SingletonDeadExpression = nullptr;
616 
617   // Which values have changed as a result of leader changes.
618   SmallPtrSet<Value *, 8> LeaderChanges;
619 
620   // Reachability info.
621   using BlockEdge = BasicBlockEdge;
622   DenseSet<BlockEdge> ReachableEdges;
623   SmallPtrSet<const BasicBlock *, 8> ReachableBlocks;
624 
625   // This is a bitvector because, on larger functions, we may have
626   // thousands of touched instructions at once (entire blocks,
627   // instructions with hundreds of uses, etc).  Even with optimization
628   // for when we mark whole blocks as touched, when this was a
629   // SmallPtrSet or DenseSet, for some functions, we spent >20% of all
630   // the time in GVN just managing this list.  The bitvector, on the
631   // other hand, efficiently supports test/set/clear of both
632   // individual and ranges, as well as "find next element" This
633   // enables us to use it as a worklist with essentially 0 cost.
634   BitVector TouchedInstructions;
635 
636   DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange;
637   mutable DenseMap<const IntrinsicInst *, const Value *> IntrinsicInstPred;
638 
639 #ifndef NDEBUG
640   // Debugging for how many times each block and instruction got processed.
641   DenseMap<const Value *, unsigned> ProcessedCount;
642 #endif
643 
644   // DFS info.
645   // This contains a mapping from Instructions to DFS numbers.
646   // The numbering starts at 1. An instruction with DFS number zero
647   // means that the instruction is dead.
648   DenseMap<const Value *, unsigned> InstrDFS;
649 
650   // This contains the mapping DFS numbers to instructions.
651   SmallVector<Value *, 32> DFSToInstr;
652 
653   // Deletion info.
654   SmallPtrSet<Instruction *, 8> InstructionsToErase;
655 
656 public:
657   NewGVN(Function &F, DominatorTree *DT, AssumptionCache *AC,
658          TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA,
659          const DataLayout &DL)
660       : F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), AC(AC), DL(DL),
661         PredInfo(std::make_unique<PredicateInfo>(F, *DT, *AC)),
662         SQ(DL, TLI, DT, AC, /*CtxI=*/nullptr, /*UseInstrInfo=*/false,
663            /*CanUseUndef=*/false) {}
664 
665   bool runGVN();
666 
667 private:
668   /// Helper struct return a Expression with an optional extra dependency.
669   struct ExprResult {
670     const Expression *Expr;
671     Value *ExtraDep;
672     const PredicateBase *PredDep;
673 
674     ExprResult(const Expression *Expr, Value *ExtraDep = nullptr,
675                const PredicateBase *PredDep = nullptr)
676         : Expr(Expr), ExtraDep(ExtraDep), PredDep(PredDep) {}
677     ExprResult(const ExprResult &) = delete;
678     ExprResult(ExprResult &&Other)
679         : Expr(Other.Expr), ExtraDep(Other.ExtraDep), PredDep(Other.PredDep) {
680       Other.Expr = nullptr;
681       Other.ExtraDep = nullptr;
682       Other.PredDep = nullptr;
683     }
684     ExprResult &operator=(const ExprResult &Other) = delete;
685     ExprResult &operator=(ExprResult &&Other) = delete;
686 
687     ~ExprResult() { assert(!ExtraDep && "unhandled ExtraDep"); }
688 
689     operator bool() const { return Expr; }
690 
691     static ExprResult none() { return {nullptr, nullptr, nullptr}; }
692     static ExprResult some(const Expression *Expr, Value *ExtraDep = nullptr) {
693       return {Expr, ExtraDep, nullptr};
694     }
695     static ExprResult some(const Expression *Expr,
696                            const PredicateBase *PredDep) {
697       return {Expr, nullptr, PredDep};
698     }
699     static ExprResult some(const Expression *Expr, Value *ExtraDep,
700                            const PredicateBase *PredDep) {
701       return {Expr, ExtraDep, PredDep};
702     }
703   };
704 
705   // Expression handling.
706   ExprResult createExpression(Instruction *) const;
707   const Expression *createBinaryExpression(unsigned, Type *, Value *, Value *,
708                                            Instruction *) const;
709 
710   // Our canonical form for phi arguments is a pair of incoming value, incoming
711   // basic block.
712   using ValPair = std::pair<Value *, BasicBlock *>;
713 
714   PHIExpression *createPHIExpression(ArrayRef<ValPair>, const Instruction *,
715                                      BasicBlock *, bool &HasBackEdge,
716                                      bool &OriginalOpsConstant) const;
717   const DeadExpression *createDeadExpression() const;
718   const VariableExpression *createVariableExpression(Value *) const;
719   const ConstantExpression *createConstantExpression(Constant *) const;
720   const Expression *createVariableOrConstant(Value *V) const;
721   const UnknownExpression *createUnknownExpression(Instruction *) const;
722   const StoreExpression *createStoreExpression(StoreInst *,
723                                                const MemoryAccess *) const;
724   LoadExpression *createLoadExpression(Type *, Value *, LoadInst *,
725                                        const MemoryAccess *) const;
726   const CallExpression *createCallExpression(CallInst *,
727                                              const MemoryAccess *) const;
728   const AggregateValueExpression *
729   createAggregateValueExpression(Instruction *) const;
730   bool setBasicExpressionInfo(Instruction *, BasicExpression *) const;
731 
732   // Congruence class handling.
733   CongruenceClass *createCongruenceClass(Value *Leader, const Expression *E) {
734     auto *result = new CongruenceClass(NextCongruenceNum++, Leader, E);
735     CongruenceClasses.emplace_back(result);
736     return result;
737   }
738 
739   CongruenceClass *createMemoryClass(MemoryAccess *MA) {
740     auto *CC = createCongruenceClass(nullptr, nullptr);
741     CC->setMemoryLeader(MA);
742     return CC;
743   }
744 
745   CongruenceClass *ensureLeaderOfMemoryClass(MemoryAccess *MA) {
746     auto *CC = getMemoryClass(MA);
747     if (CC->getMemoryLeader() != MA)
748       CC = createMemoryClass(MA);
749     return CC;
750   }
751 
752   CongruenceClass *createSingletonCongruenceClass(Value *Member) {
753     CongruenceClass *CClass = createCongruenceClass(Member, nullptr);
754     CClass->insert(Member);
755     ValueToClass[Member] = CClass;
756     return CClass;
757   }
758 
759   void initializeCongruenceClasses(Function &F);
760   const Expression *makePossiblePHIOfOps(Instruction *,
761                                          SmallPtrSetImpl<Value *> &);
762   Value *findLeaderForInst(Instruction *ValueOp,
763                            SmallPtrSetImpl<Value *> &Visited,
764                            MemoryAccess *MemAccess, Instruction *OrigInst,
765                            BasicBlock *PredBB);
766   bool OpIsSafeForPHIOfOps(Value *Op, const BasicBlock *PHIBlock,
767                            SmallPtrSetImpl<const Value *> &);
768   void addPhiOfOps(PHINode *Op, BasicBlock *BB, Instruction *ExistingValue);
769   void removePhiOfOps(Instruction *I, PHINode *PHITemp);
770 
771   // Value number an Instruction or MemoryPhi.
772   void valueNumberMemoryPhi(MemoryPhi *);
773   void valueNumberInstruction(Instruction *);
774 
775   // Symbolic evaluation.
776   ExprResult checkExprResults(Expression *, Instruction *, Value *) const;
777   ExprResult performSymbolicEvaluation(Value *,
778                                        SmallPtrSetImpl<Value *> &) const;
779   const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *,
780                                                 Instruction *,
781                                                 MemoryAccess *) const;
782   const Expression *performSymbolicLoadEvaluation(Instruction *) const;
783   const Expression *performSymbolicStoreEvaluation(Instruction *) const;
784   ExprResult performSymbolicCallEvaluation(Instruction *) const;
785   void sortPHIOps(MutableArrayRef<ValPair> Ops) const;
786   const Expression *performSymbolicPHIEvaluation(ArrayRef<ValPair>,
787                                                  Instruction *I,
788                                                  BasicBlock *PHIBlock) const;
789   const Expression *performSymbolicAggrValueEvaluation(Instruction *) const;
790   ExprResult performSymbolicCmpEvaluation(Instruction *) const;
791   ExprResult performSymbolicPredicateInfoEvaluation(IntrinsicInst *) const;
792 
793   // Congruence finding.
794   bool someEquivalentDominates(const Instruction *, const Instruction *) const;
795   Value *lookupOperandLeader(Value *) const;
796   CongruenceClass *getClassForExpression(const Expression *E) const;
797   void performCongruenceFinding(Instruction *, const Expression *);
798   void moveValueToNewCongruenceClass(Instruction *, const Expression *,
799                                      CongruenceClass *, CongruenceClass *);
800   void moveMemoryToNewCongruenceClass(Instruction *, MemoryAccess *,
801                                       CongruenceClass *, CongruenceClass *);
802   Value *getNextValueLeader(CongruenceClass *) const;
803   const MemoryAccess *getNextMemoryLeader(CongruenceClass *) const;
804   bool setMemoryClass(const MemoryAccess *From, CongruenceClass *To);
805   CongruenceClass *getMemoryClass(const MemoryAccess *MA) const;
806   const MemoryAccess *lookupMemoryLeader(const MemoryAccess *) const;
807   bool isMemoryAccessTOP(const MemoryAccess *) const;
808 
809   // Ranking
810   unsigned int getRank(const Value *) const;
811   bool shouldSwapOperands(const Value *, const Value *) const;
812   bool shouldSwapOperandsForIntrinsic(const Value *, const Value *,
813                                       const IntrinsicInst *I) const;
814 
815   // Reachability handling.
816   void updateReachableEdge(BasicBlock *, BasicBlock *);
817   void processOutgoingEdges(Instruction *, BasicBlock *);
818   Value *findConditionEquivalence(Value *) const;
819 
820   // Elimination.
821   struct ValueDFS;
822   void convertClassToDFSOrdered(const CongruenceClass &,
823                                 SmallVectorImpl<ValueDFS> &,
824                                 DenseMap<const Value *, unsigned int> &,
825                                 SmallPtrSetImpl<Instruction *> &) const;
826   void convertClassToLoadsAndStores(const CongruenceClass &,
827                                     SmallVectorImpl<ValueDFS> &) const;
828 
829   bool eliminateInstructions(Function &);
830   void replaceInstruction(Instruction *, Value *);
831   void markInstructionForDeletion(Instruction *);
832   void deleteInstructionsInBlock(BasicBlock *);
833   Value *findPHIOfOpsLeader(const Expression *, const Instruction *,
834                             const BasicBlock *) const;
835 
836   // Various instruction touch utilities
837   template <typename Map, typename KeyType>
838   void touchAndErase(Map &, const KeyType &);
839   void markUsersTouched(Value *);
840   void markMemoryUsersTouched(const MemoryAccess *);
841   void markMemoryDefTouched(const MemoryAccess *);
842   void markPredicateUsersTouched(Instruction *);
843   void markValueLeaderChangeTouched(CongruenceClass *CC);
844   void markMemoryLeaderChangeTouched(CongruenceClass *CC);
845   void markPhiOfOpsChanged(const Expression *E);
846   void addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const;
847   void addAdditionalUsers(Value *To, Value *User) const;
848   void addAdditionalUsers(ExprResult &Res, Instruction *User) const;
849 
850   // Main loop of value numbering
851   void iterateTouchedInstructions();
852 
853   // Utilities.
854   void cleanupTables();
855   std::pair<unsigned, unsigned> assignDFSNumbers(BasicBlock *, unsigned);
856   void updateProcessedCount(const Value *V);
857   void verifyMemoryCongruency() const;
858   void verifyIterationSettled(Function &F);
859   void verifyStoreExpressions() const;
860   bool singleReachablePHIPath(SmallPtrSet<const MemoryAccess *, 8> &,
861                               const MemoryAccess *, const MemoryAccess *) const;
862   BasicBlock *getBlockForValue(Value *V) const;
863   void deleteExpression(const Expression *E) const;
864   MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
865   MemoryPhi *getMemoryAccess(const BasicBlock *) const;
866   template <class T, class Range> T *getMinDFSOfRange(const Range &) const;
867 
868   unsigned InstrToDFSNum(const Value *V) const {
869     assert(isa<Instruction>(V) && "This should not be used for MemoryAccesses");
870     return InstrDFS.lookup(V);
871   }
872 
873   unsigned InstrToDFSNum(const MemoryAccess *MA) const {
874     return MemoryToDFSNum(MA);
875   }
876 
877   Value *InstrFromDFSNum(unsigned DFSNum) { return DFSToInstr[DFSNum]; }
878 
879   // Given a MemoryAccess, return the relevant instruction DFS number.  Note:
880   // This deliberately takes a value so it can be used with Use's, which will
881   // auto-convert to Value's but not to MemoryAccess's.
882   unsigned MemoryToDFSNum(const Value *MA) const {
883     assert(isa<MemoryAccess>(MA) &&
884            "This should not be used with instructions");
885     return isa<MemoryUseOrDef>(MA)
886                ? InstrToDFSNum(cast<MemoryUseOrDef>(MA)->getMemoryInst())
887                : InstrDFS.lookup(MA);
888   }
889 
890   bool isCycleFree(const Instruction *) const;
891   bool isBackedge(BasicBlock *From, BasicBlock *To) const;
892 
893   // Debug counter info.  When verifying, we have to reset the value numbering
894   // debug counter to the same state it started in to get the same results.
895   int64_t StartingVNCounter = 0;
896 };
897 
898 } // end anonymous namespace
899 
900 template <typename T>
901 static bool equalsLoadStoreHelper(const T &LHS, const Expression &RHS) {
902   if (!isa<LoadExpression>(RHS) && !isa<StoreExpression>(RHS))
903     return false;
904   return LHS.MemoryExpression::equals(RHS);
905 }
906 
907 bool LoadExpression::equals(const Expression &Other) const {
908   return equalsLoadStoreHelper(*this, Other);
909 }
910 
911 bool StoreExpression::equals(const Expression &Other) const {
912   if (!equalsLoadStoreHelper(*this, Other))
913     return false;
914   // Make sure that store vs store includes the value operand.
915   if (const auto *S = dyn_cast<StoreExpression>(&Other))
916     if (getStoredValue() != S->getStoredValue())
917       return false;
918   return true;
919 }
920 
921 // Determine if the edge From->To is a backedge
922 bool NewGVN::isBackedge(BasicBlock *From, BasicBlock *To) const {
923   return From == To ||
924          RPOOrdering.lookup(DT->getNode(From)) >=
925              RPOOrdering.lookup(DT->getNode(To));
926 }
927 
928 #ifndef NDEBUG
929 static std::string getBlockName(const BasicBlock *B) {
930   return DOTGraphTraits<DOTFuncInfo *>::getSimpleNodeLabel(B, nullptr);
931 }
932 #endif
933 
934 // Get a MemoryAccess for an instruction, fake or real.
935 MemoryUseOrDef *NewGVN::getMemoryAccess(const Instruction *I) const {
936   auto *Result = MSSA->getMemoryAccess(I);
937   return Result ? Result : TempToMemory.lookup(I);
938 }
939 
940 // Get a MemoryPhi for a basic block. These are all real.
941 MemoryPhi *NewGVN::getMemoryAccess(const BasicBlock *BB) const {
942   return MSSA->getMemoryAccess(BB);
943 }
944 
945 // Get the basic block from an instruction/memory value.
946 BasicBlock *NewGVN::getBlockForValue(Value *V) const {
947   if (auto *I = dyn_cast<Instruction>(V)) {
948     auto *Parent = I->getParent();
949     if (Parent)
950       return Parent;
951     Parent = TempToBlock.lookup(V);
952     assert(Parent && "Every fake instruction should have a block");
953     return Parent;
954   }
955 
956   auto *MP = dyn_cast<MemoryPhi>(V);
957   assert(MP && "Should have been an instruction or a MemoryPhi");
958   return MP->getBlock();
959 }
960 
961 // Delete a definitely dead expression, so it can be reused by the expression
962 // allocator.  Some of these are not in creation functions, so we have to accept
963 // const versions.
964 void NewGVN::deleteExpression(const Expression *E) const {
965   assert(isa<BasicExpression>(E));
966   auto *BE = cast<BasicExpression>(E);
967   const_cast<BasicExpression *>(BE)->deallocateOperands(ArgRecycler);
968   ExpressionAllocator.Deallocate(E);
969 }
970 
971 // If V is a predicateinfo copy, get the thing it is a copy of.
972 static Value *getCopyOf(const Value *V) {
973   if (auto *II = dyn_cast<IntrinsicInst>(V))
974     if (II->getIntrinsicID() == Intrinsic::ssa_copy)
975       return II->getOperand(0);
976   return nullptr;
977 }
978 
979 // Return true if V is really PN, even accounting for predicateinfo copies.
980 static bool isCopyOfPHI(const Value *V, const PHINode *PN) {
981   return V == PN || getCopyOf(V) == PN;
982 }
983 
984 static bool isCopyOfAPHI(const Value *V) {
985   auto *CO = getCopyOf(V);
986   return CO && isa<PHINode>(CO);
987 }
988 
989 // Sort PHI Operands into a canonical order.  What we use here is an RPO
990 // order. The BlockInstRange numbers are generated in an RPO walk of the basic
991 // blocks.
992 void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const {
993   llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) {
994     return BlockInstRange.lookup(P1.second).first <
995            BlockInstRange.lookup(P2.second).first;
996   });
997 }
998 
999 // Return true if V is a value that will always be available (IE can
1000 // be placed anywhere) in the function.  We don't do globals here
1001 // because they are often worse to put in place.
1002 static bool alwaysAvailable(Value *V) {
1003   return isa<Constant>(V) || isa<Argument>(V);
1004 }
1005 
1006 // Create a PHIExpression from an array of {incoming edge, value} pairs.  I is
1007 // the original instruction we are creating a PHIExpression for (but may not be
1008 // a phi node). We require, as an invariant, that all the PHIOperands in the
1009 // same block are sorted the same way. sortPHIOps will sort them into a
1010 // canonical order.
1011 PHIExpression *NewGVN::createPHIExpression(ArrayRef<ValPair> PHIOperands,
1012                                            const Instruction *I,
1013                                            BasicBlock *PHIBlock,
1014                                            bool &HasBackedge,
1015                                            bool &OriginalOpsConstant) const {
1016   unsigned NumOps = PHIOperands.size();
1017   auto *E = new (ExpressionAllocator) PHIExpression(NumOps, PHIBlock);
1018 
1019   E->allocateOperands(ArgRecycler, ExpressionAllocator);
1020   E->setType(PHIOperands.begin()->first->getType());
1021   E->setOpcode(Instruction::PHI);
1022 
1023   // Filter out unreachable phi operands.
1024   auto Filtered = make_filter_range(PHIOperands, [&](const ValPair &P) {
1025     auto *BB = P.second;
1026     if (auto *PHIOp = dyn_cast<PHINode>(I))
1027       if (isCopyOfPHI(P.first, PHIOp))
1028         return false;
1029     if (!ReachableEdges.count({BB, PHIBlock}))
1030       return false;
1031     // Things in TOPClass are equivalent to everything.
1032     if (ValueToClass.lookup(P.first) == TOPClass)
1033       return false;
1034     OriginalOpsConstant = OriginalOpsConstant && isa<Constant>(P.first);
1035     HasBackedge = HasBackedge || isBackedge(BB, PHIBlock);
1036     return lookupOperandLeader(P.first) != I;
1037   });
1038   std::transform(Filtered.begin(), Filtered.end(), op_inserter(E),
1039                  [&](const ValPair &P) -> Value * {
1040                    return lookupOperandLeader(P.first);
1041                  });
1042   return E;
1043 }
1044 
1045 // Set basic expression info (Arguments, type, opcode) for Expression
1046 // E from Instruction I in block B.
1047 bool NewGVN::setBasicExpressionInfo(Instruction *I, BasicExpression *E) const {
1048   bool AllConstant = true;
1049   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1050     E->setType(GEP->getSourceElementType());
1051   else
1052     E->setType(I->getType());
1053   E->setOpcode(I->getOpcode());
1054   E->allocateOperands(ArgRecycler, ExpressionAllocator);
1055 
1056   // Transform the operand array into an operand leader array, and keep track of
1057   // whether all members are constant.
1058   std::transform(I->op_begin(), I->op_end(), op_inserter(E), [&](Value *O) {
1059     auto Operand = lookupOperandLeader(O);
1060     AllConstant = AllConstant && isa<Constant>(Operand);
1061     return Operand;
1062   });
1063 
1064   return AllConstant;
1065 }
1066 
1067 const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T,
1068                                                  Value *Arg1, Value *Arg2,
1069                                                  Instruction *I) const {
1070   auto *E = new (ExpressionAllocator) BasicExpression(2);
1071   // TODO: we need to remove context instruction after Value Tracking
1072   // can run without context instruction
1073   const SimplifyQuery Q = SQ.getWithInstruction(I);
1074 
1075   E->setType(T);
1076   E->setOpcode(Opcode);
1077   E->allocateOperands(ArgRecycler, ExpressionAllocator);
1078   if (Instruction::isCommutative(Opcode)) {
1079     // Ensure that commutative instructions that only differ by a permutation
1080     // of their operands get the same value number by sorting the operand value
1081     // numbers.  Since all commutative instructions have two operands it is more
1082     // efficient to sort by hand rather than using, say, std::sort.
1083     if (shouldSwapOperands(Arg1, Arg2))
1084       std::swap(Arg1, Arg2);
1085   }
1086   E->op_push_back(lookupOperandLeader(Arg1));
1087   E->op_push_back(lookupOperandLeader(Arg2));
1088 
1089   Value *V = simplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), Q);
1090   if (auto Simplified = checkExprResults(E, I, V)) {
1091     addAdditionalUsers(Simplified, I);
1092     return Simplified.Expr;
1093   }
1094   return E;
1095 }
1096 
1097 // Take a Value returned by simplification of Expression E/Instruction
1098 // I, and see if it resulted in a simpler expression. If so, return
1099 // that expression.
1100 NewGVN::ExprResult NewGVN::checkExprResults(Expression *E, Instruction *I,
1101                                             Value *V) const {
1102   if (!V)
1103     return ExprResult::none();
1104 
1105   if (auto *C = dyn_cast<Constant>(V)) {
1106     if (I)
1107       LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1108                         << " constant " << *C << "\n");
1109     NumGVNOpsSimplified++;
1110     assert(isa<BasicExpression>(E) &&
1111            "We should always have had a basic expression here");
1112     deleteExpression(E);
1113     return ExprResult::some(createConstantExpression(C));
1114   } else if (isa<Argument>(V) || isa<GlobalVariable>(V)) {
1115     if (I)
1116       LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1117                         << " variable " << *V << "\n");
1118     deleteExpression(E);
1119     return ExprResult::some(createVariableExpression(V));
1120   }
1121 
1122   CongruenceClass *CC = ValueToClass.lookup(V);
1123   if (CC) {
1124     if (CC->getLeader() && CC->getLeader() != I) {
1125       return ExprResult::some(createVariableOrConstant(CC->getLeader()), V);
1126     }
1127     if (CC->getDefiningExpr()) {
1128       if (I)
1129         LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1130                           << " expression " << *CC->getDefiningExpr() << "\n");
1131       NumGVNOpsSimplified++;
1132       deleteExpression(E);
1133       return ExprResult::some(CC->getDefiningExpr(), V);
1134     }
1135   }
1136 
1137   return ExprResult::none();
1138 }
1139 
1140 // Create a value expression from the instruction I, replacing operands with
1141 // their leaders.
1142 
1143 NewGVN::ExprResult NewGVN::createExpression(Instruction *I) const {
1144   auto *E = new (ExpressionAllocator) BasicExpression(I->getNumOperands());
1145   // TODO: we need to remove context instruction after Value Tracking
1146   // can run without context instruction
1147   const SimplifyQuery Q = SQ.getWithInstruction(I);
1148 
1149   bool AllConstant = setBasicExpressionInfo(I, E);
1150 
1151   if (I->isCommutative()) {
1152     // Ensure that commutative instructions that only differ by a permutation
1153     // of their operands get the same value number by sorting the operand value
1154     // numbers.  Since all commutative instructions have two operands it is more
1155     // efficient to sort by hand rather than using, say, std::sort.
1156     assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
1157     if (shouldSwapOperands(E->getOperand(0), E->getOperand(1)))
1158       E->swapOperands(0, 1);
1159   }
1160   // Perform simplification.
1161   if (auto *CI = dyn_cast<CmpInst>(I)) {
1162     // Sort the operand value numbers so x<y and y>x get the same value
1163     // number.
1164     CmpInst::Predicate Predicate = CI->getPredicate();
1165     if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) {
1166       E->swapOperands(0, 1);
1167       Predicate = CmpInst::getSwappedPredicate(Predicate);
1168     }
1169     E->setOpcode((CI->getOpcode() << 8) | Predicate);
1170     // TODO: 25% of our time is spent in simplifyCmpInst with pointer operands
1171     assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() &&
1172            "Wrong types on cmp instruction");
1173     assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() &&
1174             E->getOperand(1)->getType() == I->getOperand(1)->getType()));
1175     Value *V =
1176         simplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), Q);
1177     if (auto Simplified = checkExprResults(E, I, V))
1178       return Simplified;
1179   } else if (isa<SelectInst>(I)) {
1180     if (isa<Constant>(E->getOperand(0)) ||
1181         E->getOperand(1) == E->getOperand(2)) {
1182       assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() &&
1183              E->getOperand(2)->getType() == I->getOperand(2)->getType());
1184       Value *V = simplifySelectInst(E->getOperand(0), E->getOperand(1),
1185                                     E->getOperand(2), Q);
1186       if (auto Simplified = checkExprResults(E, I, V))
1187         return Simplified;
1188     }
1189   } else if (I->isBinaryOp()) {
1190     Value *V =
1191         simplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), Q);
1192     if (auto Simplified = checkExprResults(E, I, V))
1193       return Simplified;
1194   } else if (auto *CI = dyn_cast<CastInst>(I)) {
1195     Value *V =
1196         simplifyCastInst(CI->getOpcode(), E->getOperand(0), CI->getType(), Q);
1197     if (auto Simplified = checkExprResults(E, I, V))
1198       return Simplified;
1199   } else if (auto *GEPI = dyn_cast<GetElementPtrInst>(I)) {
1200     Value *V = simplifyGEPInst(GEPI->getSourceElementType(), *E->op_begin(),
1201                                ArrayRef(std::next(E->op_begin()), E->op_end()),
1202                                GEPI->isInBounds(), Q);
1203     if (auto Simplified = checkExprResults(E, I, V))
1204       return Simplified;
1205   } else if (AllConstant) {
1206     // We don't bother trying to simplify unless all of the operands
1207     // were constant.
1208     // TODO: There are a lot of Simplify*'s we could call here, if we
1209     // wanted to.  The original motivating case for this code was a
1210     // zext i1 false to i8, which we don't have an interface to
1211     // simplify (IE there is no SimplifyZExt).
1212 
1213     SmallVector<Constant *, 8> C;
1214     for (Value *Arg : E->operands())
1215       C.emplace_back(cast<Constant>(Arg));
1216 
1217     if (Value *V = ConstantFoldInstOperands(I, C, DL, TLI))
1218       if (auto Simplified = checkExprResults(E, I, V))
1219         return Simplified;
1220   }
1221   return ExprResult::some(E);
1222 }
1223 
1224 const AggregateValueExpression *
1225 NewGVN::createAggregateValueExpression(Instruction *I) const {
1226   if (auto *II = dyn_cast<InsertValueInst>(I)) {
1227     auto *E = new (ExpressionAllocator)
1228         AggregateValueExpression(I->getNumOperands(), II->getNumIndices());
1229     setBasicExpressionInfo(I, E);
1230     E->allocateIntOperands(ExpressionAllocator);
1231     std::copy(II->idx_begin(), II->idx_end(), int_op_inserter(E));
1232     return E;
1233   } else if (auto *EI = dyn_cast<ExtractValueInst>(I)) {
1234     auto *E = new (ExpressionAllocator)
1235         AggregateValueExpression(I->getNumOperands(), EI->getNumIndices());
1236     setBasicExpressionInfo(EI, E);
1237     E->allocateIntOperands(ExpressionAllocator);
1238     std::copy(EI->idx_begin(), EI->idx_end(), int_op_inserter(E));
1239     return E;
1240   }
1241   llvm_unreachable("Unhandled type of aggregate value operation");
1242 }
1243 
1244 const DeadExpression *NewGVN::createDeadExpression() const {
1245   // DeadExpression has no arguments and all DeadExpression's are the same,
1246   // so we only need one of them.
1247   return SingletonDeadExpression;
1248 }
1249 
1250 const VariableExpression *NewGVN::createVariableExpression(Value *V) const {
1251   auto *E = new (ExpressionAllocator) VariableExpression(V);
1252   E->setOpcode(V->getValueID());
1253   return E;
1254 }
1255 
1256 const Expression *NewGVN::createVariableOrConstant(Value *V) const {
1257   if (auto *C = dyn_cast<Constant>(V))
1258     return createConstantExpression(C);
1259   return createVariableExpression(V);
1260 }
1261 
1262 const ConstantExpression *NewGVN::createConstantExpression(Constant *C) const {
1263   auto *E = new (ExpressionAllocator) ConstantExpression(C);
1264   E->setOpcode(C->getValueID());
1265   return E;
1266 }
1267 
1268 const UnknownExpression *NewGVN::createUnknownExpression(Instruction *I) const {
1269   auto *E = new (ExpressionAllocator) UnknownExpression(I);
1270   E->setOpcode(I->getOpcode());
1271   return E;
1272 }
1273 
1274 const CallExpression *
1275 NewGVN::createCallExpression(CallInst *CI, const MemoryAccess *MA) const {
1276   // FIXME: Add operand bundles for calls.
1277   auto *E =
1278       new (ExpressionAllocator) CallExpression(CI->getNumOperands(), CI, MA);
1279   setBasicExpressionInfo(CI, E);
1280   if (CI->isCommutative()) {
1281     // Ensure that commutative intrinsics that only differ by a permutation
1282     // of their operands get the same value number by sorting the operand value
1283     // numbers.
1284     assert(CI->getNumOperands() >= 2 && "Unsupported commutative intrinsic!");
1285     if (shouldSwapOperands(E->getOperand(0), E->getOperand(1)))
1286       E->swapOperands(0, 1);
1287   }
1288   return E;
1289 }
1290 
1291 // Return true if some equivalent of instruction Inst dominates instruction U.
1292 bool NewGVN::someEquivalentDominates(const Instruction *Inst,
1293                                      const Instruction *U) const {
1294   auto *CC = ValueToClass.lookup(Inst);
1295    // This must be an instruction because we are only called from phi nodes
1296   // in the case that the value it needs to check against is an instruction.
1297 
1298   // The most likely candidates for dominance are the leader and the next leader.
1299   // The leader or nextleader will dominate in all cases where there is an
1300   // equivalent that is higher up in the dom tree.
1301   // We can't *only* check them, however, because the
1302   // dominator tree could have an infinite number of non-dominating siblings
1303   // with instructions that are in the right congruence class.
1304   //       A
1305   // B C D E F G
1306   // |
1307   // H
1308   // Instruction U could be in H,  with equivalents in every other sibling.
1309   // Depending on the rpo order picked, the leader could be the equivalent in
1310   // any of these siblings.
1311   if (!CC)
1312     return false;
1313   if (alwaysAvailable(CC->getLeader()))
1314     return true;
1315   if (DT->dominates(cast<Instruction>(CC->getLeader()), U))
1316     return true;
1317   if (CC->getNextLeader().first &&
1318       DT->dominates(cast<Instruction>(CC->getNextLeader().first), U))
1319     return true;
1320   return llvm::any_of(*CC, [&](const Value *Member) {
1321     return Member != CC->getLeader() &&
1322            DT->dominates(cast<Instruction>(Member), U);
1323   });
1324 }
1325 
1326 // See if we have a congruence class and leader for this operand, and if so,
1327 // return it. Otherwise, return the operand itself.
1328 Value *NewGVN::lookupOperandLeader(Value *V) const {
1329   CongruenceClass *CC = ValueToClass.lookup(V);
1330   if (CC) {
1331     // Everything in TOP is represented by poison, as it can be any value.
1332     // We do have to make sure we get the type right though, so we can't set the
1333     // RepLeader to poison.
1334     if (CC == TOPClass)
1335       return PoisonValue::get(V->getType());
1336     return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
1337   }
1338 
1339   return V;
1340 }
1341 
1342 const MemoryAccess *NewGVN::lookupMemoryLeader(const MemoryAccess *MA) const {
1343   auto *CC = getMemoryClass(MA);
1344   assert(CC->getMemoryLeader() &&
1345          "Every MemoryAccess should be mapped to a congruence class with a "
1346          "representative memory access");
1347   return CC->getMemoryLeader();
1348 }
1349 
1350 // Return true if the MemoryAccess is really equivalent to everything. This is
1351 // equivalent to the lattice value "TOP" in most lattices.  This is the initial
1352 // state of all MemoryAccesses.
1353 bool NewGVN::isMemoryAccessTOP(const MemoryAccess *MA) const {
1354   return getMemoryClass(MA) == TOPClass;
1355 }
1356 
1357 LoadExpression *NewGVN::createLoadExpression(Type *LoadType, Value *PointerOp,
1358                                              LoadInst *LI,
1359                                              const MemoryAccess *MA) const {
1360   auto *E =
1361       new (ExpressionAllocator) LoadExpression(1, LI, lookupMemoryLeader(MA));
1362   E->allocateOperands(ArgRecycler, ExpressionAllocator);
1363   E->setType(LoadType);
1364 
1365   // Give store and loads same opcode so they value number together.
1366   E->setOpcode(0);
1367   E->op_push_back(PointerOp);
1368 
1369   // TODO: Value number heap versions. We may be able to discover
1370   // things alias analysis can't on it's own (IE that a store and a
1371   // load have the same value, and thus, it isn't clobbering the load).
1372   return E;
1373 }
1374 
1375 const StoreExpression *
1376 NewGVN::createStoreExpression(StoreInst *SI, const MemoryAccess *MA) const {
1377   auto *StoredValueLeader = lookupOperandLeader(SI->getValueOperand());
1378   auto *E = new (ExpressionAllocator)
1379       StoreExpression(SI->getNumOperands(), SI, StoredValueLeader, MA);
1380   E->allocateOperands(ArgRecycler, ExpressionAllocator);
1381   E->setType(SI->getValueOperand()->getType());
1382 
1383   // Give store and loads same opcode so they value number together.
1384   E->setOpcode(0);
1385   E->op_push_back(lookupOperandLeader(SI->getPointerOperand()));
1386 
1387   // TODO: Value number heap versions. We may be able to discover
1388   // things alias analysis can't on it's own (IE that a store and a
1389   // load have the same value, and thus, it isn't clobbering the load).
1390   return E;
1391 }
1392 
1393 const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) const {
1394   // Unlike loads, we never try to eliminate stores, so we do not check if they
1395   // are simple and avoid value numbering them.
1396   auto *SI = cast<StoreInst>(I);
1397   auto *StoreAccess = getMemoryAccess(SI);
1398   // Get the expression, if any, for the RHS of the MemoryDef.
1399   const MemoryAccess *StoreRHS = StoreAccess->getDefiningAccess();
1400   if (EnableStoreRefinement)
1401     StoreRHS = MSSAWalker->getClobberingMemoryAccess(StoreAccess);
1402   // If we bypassed the use-def chains, make sure we add a use.
1403   StoreRHS = lookupMemoryLeader(StoreRHS);
1404   if (StoreRHS != StoreAccess->getDefiningAccess())
1405     addMemoryUsers(StoreRHS, StoreAccess);
1406   // If we are defined by ourselves, use the live on entry def.
1407   if (StoreRHS == StoreAccess)
1408     StoreRHS = MSSA->getLiveOnEntryDef();
1409 
1410   if (SI->isSimple()) {
1411     // See if we are defined by a previous store expression, it already has a
1412     // value, and it's the same value as our current store. FIXME: Right now, we
1413     // only do this for simple stores, we should expand to cover memcpys, etc.
1414     const auto *LastStore = createStoreExpression(SI, StoreRHS);
1415     const auto *LastCC = ExpressionToClass.lookup(LastStore);
1416     // We really want to check whether the expression we matched was a store. No
1417     // easy way to do that. However, we can check that the class we found has a
1418     // store, which, assuming the value numbering state is not corrupt, is
1419     // sufficient, because we must also be equivalent to that store's expression
1420     // for it to be in the same class as the load.
1421     if (LastCC && LastCC->getStoredValue() == LastStore->getStoredValue())
1422       return LastStore;
1423     // Also check if our value operand is defined by a load of the same memory
1424     // location, and the memory state is the same as it was then (otherwise, it
1425     // could have been overwritten later. See test32 in
1426     // transforms/DeadStoreElimination/simple.ll).
1427     if (auto *LI = dyn_cast<LoadInst>(LastStore->getStoredValue()))
1428       if ((lookupOperandLeader(LI->getPointerOperand()) ==
1429            LastStore->getOperand(0)) &&
1430           (lookupMemoryLeader(getMemoryAccess(LI)->getDefiningAccess()) ==
1431            StoreRHS))
1432         return LastStore;
1433     deleteExpression(LastStore);
1434   }
1435 
1436   // If the store is not equivalent to anything, value number it as a store that
1437   // produces a unique memory state (instead of using it's MemoryUse, we use
1438   // it's MemoryDef).
1439   return createStoreExpression(SI, StoreAccess);
1440 }
1441 
1442 // See if we can extract the value of a loaded pointer from a load, a store, or
1443 // a memory instruction.
1444 const Expression *
1445 NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
1446                                     LoadInst *LI, Instruction *DepInst,
1447                                     MemoryAccess *DefiningAccess) const {
1448   assert((!LI || LI->isSimple()) && "Not a simple load");
1449   if (auto *DepSI = dyn_cast<StoreInst>(DepInst)) {
1450     // Can't forward from non-atomic to atomic without violating memory model.
1451     // Also don't need to coerce if they are the same type, we will just
1452     // propagate.
1453     if (LI->isAtomic() > DepSI->isAtomic() ||
1454         LoadType == DepSI->getValueOperand()->getType())
1455       return nullptr;
1456     int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL);
1457     if (Offset >= 0) {
1458       if (auto *C = dyn_cast<Constant>(
1459               lookupOperandLeader(DepSI->getValueOperand()))) {
1460         if (Constant *Res = getConstantValueForLoad(C, Offset, LoadType, DL)) {
1461           LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI
1462                             << " to constant " << *Res << "\n");
1463           return createConstantExpression(Res);
1464         }
1465       }
1466     }
1467   } else if (auto *DepLI = dyn_cast<LoadInst>(DepInst)) {
1468     // Can't forward from non-atomic to atomic without violating memory model.
1469     if (LI->isAtomic() > DepLI->isAtomic())
1470       return nullptr;
1471     int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL);
1472     if (Offset >= 0) {
1473       // We can coerce a constant load into a load.
1474       if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI)))
1475         if (auto *PossibleConstant =
1476                 getConstantValueForLoad(C, Offset, LoadType, DL)) {
1477           LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI
1478                             << " to constant " << *PossibleConstant << "\n");
1479           return createConstantExpression(PossibleConstant);
1480         }
1481     }
1482   } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1483     int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL);
1484     if (Offset >= 0) {
1485       if (auto *PossibleConstant =
1486               getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) {
1487         LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
1488                           << " to constant " << *PossibleConstant << "\n");
1489         return createConstantExpression(PossibleConstant);
1490       }
1491     }
1492   }
1493 
1494   // All of the below are only true if the loaded pointer is produced
1495   // by the dependent instruction.
1496   if (LoadPtr != lookupOperandLeader(DepInst) &&
1497       !AA->isMustAlias(LoadPtr, DepInst))
1498     return nullptr;
1499   // If this load really doesn't depend on anything, then we must be loading an
1500   // undef value.  This can happen when loading for a fresh allocation with no
1501   // intervening stores, for example.  Note that this is only true in the case
1502   // that the result of the allocation is pointer equal to the load ptr.
1503   if (isa<AllocaInst>(DepInst)) {
1504     return createConstantExpression(UndefValue::get(LoadType));
1505   }
1506   // If this load occurs either right after a lifetime begin,
1507   // then the loaded value is undefined.
1508   else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) {
1509     if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1510       return createConstantExpression(UndefValue::get(LoadType));
1511   } else if (auto *InitVal =
1512                  getInitialValueOfAllocation(DepInst, TLI, LoadType))
1513       return createConstantExpression(InitVal);
1514 
1515   return nullptr;
1516 }
1517 
1518 const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const {
1519   auto *LI = cast<LoadInst>(I);
1520 
1521   // We can eliminate in favor of non-simple loads, but we won't be able to
1522   // eliminate the loads themselves.
1523   if (!LI->isSimple())
1524     return nullptr;
1525 
1526   Value *LoadAddressLeader = lookupOperandLeader(LI->getPointerOperand());
1527   // Load of undef is UB.
1528   if (isa<UndefValue>(LoadAddressLeader))
1529     return createConstantExpression(PoisonValue::get(LI->getType()));
1530   MemoryAccess *OriginalAccess = getMemoryAccess(I);
1531   MemoryAccess *DefiningAccess =
1532       MSSAWalker->getClobberingMemoryAccess(OriginalAccess);
1533 
1534   if (!MSSA->isLiveOnEntryDef(DefiningAccess)) {
1535     if (auto *MD = dyn_cast<MemoryDef>(DefiningAccess)) {
1536       Instruction *DefiningInst = MD->getMemoryInst();
1537       // If the defining instruction is not reachable, replace with poison.
1538       if (!ReachableBlocks.count(DefiningInst->getParent()))
1539         return createConstantExpression(PoisonValue::get(LI->getType()));
1540       // This will handle stores and memory insts.  We only do if it the
1541       // defining access has a different type, or it is a pointer produced by
1542       // certain memory operations that cause the memory to have a fixed value
1543       // (IE things like calloc).
1544       if (const auto *CoercionResult =
1545               performSymbolicLoadCoercion(LI->getType(), LoadAddressLeader, LI,
1546                                           DefiningInst, DefiningAccess))
1547         return CoercionResult;
1548     }
1549   }
1550 
1551   const auto *LE = createLoadExpression(LI->getType(), LoadAddressLeader, LI,
1552                                         DefiningAccess);
1553   // If our MemoryLeader is not our defining access, add a use to the
1554   // MemoryLeader, so that we get reprocessed when it changes.
1555   if (LE->getMemoryLeader() != DefiningAccess)
1556     addMemoryUsers(LE->getMemoryLeader(), OriginalAccess);
1557   return LE;
1558 }
1559 
1560 NewGVN::ExprResult
1561 NewGVN::performSymbolicPredicateInfoEvaluation(IntrinsicInst *I) const {
1562   auto *PI = PredInfo->getPredicateInfoFor(I);
1563   if (!PI)
1564     return ExprResult::none();
1565 
1566   LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n");
1567 
1568   const std::optional<PredicateConstraint> &Constraint = PI->getConstraint();
1569   if (!Constraint)
1570     return ExprResult::none();
1571 
1572   CmpInst::Predicate Predicate = Constraint->Predicate;
1573   Value *CmpOp0 = I->getOperand(0);
1574   Value *CmpOp1 = Constraint->OtherOp;
1575 
1576   Value *FirstOp = lookupOperandLeader(CmpOp0);
1577   Value *SecondOp = lookupOperandLeader(CmpOp1);
1578   Value *AdditionallyUsedValue = CmpOp0;
1579 
1580   // Sort the ops.
1581   if (shouldSwapOperandsForIntrinsic(FirstOp, SecondOp, I)) {
1582     std::swap(FirstOp, SecondOp);
1583     Predicate = CmpInst::getSwappedPredicate(Predicate);
1584     AdditionallyUsedValue = CmpOp1;
1585   }
1586 
1587   if (Predicate == CmpInst::ICMP_EQ)
1588     return ExprResult::some(createVariableOrConstant(FirstOp),
1589                             AdditionallyUsedValue, PI);
1590 
1591   // Handle the special case of floating point.
1592   if (Predicate == CmpInst::FCMP_OEQ && isa<ConstantFP>(FirstOp) &&
1593       !cast<ConstantFP>(FirstOp)->isZero())
1594     return ExprResult::some(createConstantExpression(cast<Constant>(FirstOp)),
1595                             AdditionallyUsedValue, PI);
1596 
1597   return ExprResult::none();
1598 }
1599 
1600 // Evaluate read only and pure calls, and create an expression result.
1601 NewGVN::ExprResult NewGVN::performSymbolicCallEvaluation(Instruction *I) const {
1602   auto *CI = cast<CallInst>(I);
1603   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1604     // Intrinsics with the returned attribute are copies of arguments.
1605     if (auto *ReturnedValue = II->getReturnedArgOperand()) {
1606       if (II->getIntrinsicID() == Intrinsic::ssa_copy)
1607         if (auto Res = performSymbolicPredicateInfoEvaluation(II))
1608           return Res;
1609       return ExprResult::some(createVariableOrConstant(ReturnedValue));
1610     }
1611   }
1612 
1613   // FIXME: Currently the calls which may access the thread id may
1614   // be considered as not accessing the memory. But this is
1615   // problematic for coroutines, since coroutines may resume in a
1616   // different thread. So we disable the optimization here for the
1617   // correctness. However, it may block many other correct
1618   // optimizations. Revert this one when we detect the memory
1619   // accessing kind more precisely.
1620   if (CI->getFunction()->isPresplitCoroutine())
1621     return ExprResult::none();
1622 
1623   // Do not combine convergent calls since they implicitly depend on the set of
1624   // threads that is currently executing, and they might be in different basic
1625   // blocks.
1626   if (CI->isConvergent())
1627     return ExprResult::none();
1628 
1629   if (AA->doesNotAccessMemory(CI)) {
1630     return ExprResult::some(
1631         createCallExpression(CI, TOPClass->getMemoryLeader()));
1632   } else if (AA->onlyReadsMemory(CI)) {
1633     if (auto *MA = MSSA->getMemoryAccess(CI)) {
1634       auto *DefiningAccess = MSSAWalker->getClobberingMemoryAccess(MA);
1635       return ExprResult::some(createCallExpression(CI, DefiningAccess));
1636     } else // MSSA determined that CI does not access memory.
1637       return ExprResult::some(
1638           createCallExpression(CI, TOPClass->getMemoryLeader()));
1639   }
1640   return ExprResult::none();
1641 }
1642 
1643 // Retrieve the memory class for a given MemoryAccess.
1644 CongruenceClass *NewGVN::getMemoryClass(const MemoryAccess *MA) const {
1645   auto *Result = MemoryAccessToClass.lookup(MA);
1646   assert(Result && "Should have found memory class");
1647   return Result;
1648 }
1649 
1650 // Update the MemoryAccess equivalence table to say that From is equal to To,
1651 // and return true if this is different from what already existed in the table.
1652 bool NewGVN::setMemoryClass(const MemoryAccess *From,
1653                             CongruenceClass *NewClass) {
1654   assert(NewClass &&
1655          "Every MemoryAccess should be getting mapped to a non-null class");
1656   LLVM_DEBUG(dbgs() << "Setting " << *From);
1657   LLVM_DEBUG(dbgs() << " equivalent to congruence class ");
1658   LLVM_DEBUG(dbgs() << NewClass->getID()
1659                     << " with current MemoryAccess leader ");
1660   LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n");
1661 
1662   auto LookupResult = MemoryAccessToClass.find(From);
1663   bool Changed = false;
1664   // If it's already in the table, see if the value changed.
1665   if (LookupResult != MemoryAccessToClass.end()) {
1666     auto *OldClass = LookupResult->second;
1667     if (OldClass != NewClass) {
1668       // If this is a phi, we have to handle memory member updates.
1669       if (auto *MP = dyn_cast<MemoryPhi>(From)) {
1670         OldClass->memory_erase(MP);
1671         NewClass->memory_insert(MP);
1672         // This may have killed the class if it had no non-memory members
1673         if (OldClass->getMemoryLeader() == From) {
1674           if (OldClass->definesNoMemory()) {
1675             OldClass->setMemoryLeader(nullptr);
1676           } else {
1677             OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
1678             LLVM_DEBUG(dbgs() << "Memory class leader change for class "
1679                               << OldClass->getID() << " to "
1680                               << *OldClass->getMemoryLeader()
1681                               << " due to removal of a memory member " << *From
1682                               << "\n");
1683             markMemoryLeaderChangeTouched(OldClass);
1684           }
1685         }
1686       }
1687       // It wasn't equivalent before, and now it is.
1688       LookupResult->second = NewClass;
1689       Changed = true;
1690     }
1691   }
1692 
1693   return Changed;
1694 }
1695 
1696 // Determine if a instruction is cycle-free.  That means the values in the
1697 // instruction don't depend on any expressions that can change value as a result
1698 // of the instruction.  For example, a non-cycle free instruction would be v =
1699 // phi(0, v+1).
1700 bool NewGVN::isCycleFree(const Instruction *I) const {
1701   // In order to compute cycle-freeness, we do SCC finding on the instruction,
1702   // and see what kind of SCC it ends up in.  If it is a singleton, it is
1703   // cycle-free.  If it is not in a singleton, it is only cycle free if the
1704   // other members are all phi nodes (as they do not compute anything, they are
1705   // copies).
1706   auto ICS = InstCycleState.lookup(I);
1707   if (ICS == ICS_Unknown) {
1708     SCCFinder.Start(I);
1709     auto &SCC = SCCFinder.getComponentFor(I);
1710     // It's cycle free if it's size 1 or the SCC is *only* phi nodes.
1711     if (SCC.size() == 1)
1712       InstCycleState.insert({I, ICS_CycleFree});
1713     else {
1714       bool AllPhis = llvm::all_of(SCC, [](const Value *V) {
1715         return isa<PHINode>(V) || isCopyOfAPHI(V);
1716       });
1717       ICS = AllPhis ? ICS_CycleFree : ICS_Cycle;
1718       for (const auto *Member : SCC)
1719         if (auto *MemberPhi = dyn_cast<PHINode>(Member))
1720           InstCycleState.insert({MemberPhi, ICS});
1721     }
1722   }
1723   if (ICS == ICS_Cycle)
1724     return false;
1725   return true;
1726 }
1727 
1728 // Evaluate PHI nodes symbolically and create an expression result.
1729 const Expression *
1730 NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
1731                                      Instruction *I,
1732                                      BasicBlock *PHIBlock) const {
1733   // True if one of the incoming phi edges is a backedge.
1734   bool HasBackedge = false;
1735   // All constant tracks the state of whether all the *original* phi operands
1736   // This is really shorthand for "this phi cannot cycle due to forward
1737   // change in value of the phi is guaranteed not to later change the value of
1738   // the phi. IE it can't be v = phi(undef, v+1)
1739   bool OriginalOpsConstant = true;
1740   auto *E = cast<PHIExpression>(createPHIExpression(
1741       PHIOps, I, PHIBlock, HasBackedge, OriginalOpsConstant));
1742   // We match the semantics of SimplifyPhiNode from InstructionSimplify here.
1743   // See if all arguments are the same.
1744   // We track if any were undef because they need special handling.
1745   bool HasUndef = false, HasPoison = false;
1746   auto Filtered = make_filter_range(E->operands(), [&](Value *Arg) {
1747     if (isa<PoisonValue>(Arg)) {
1748       HasPoison = true;
1749       return false;
1750     }
1751     if (isa<UndefValue>(Arg)) {
1752       HasUndef = true;
1753       return false;
1754     }
1755     return true;
1756   });
1757   // If we are left with no operands, it's dead.
1758   if (Filtered.empty()) {
1759     // If it has undef or poison at this point, it means there are no-non-undef
1760     // arguments, and thus, the value of the phi node must be undef.
1761     if (HasUndef) {
1762       LLVM_DEBUG(
1763           dbgs() << "PHI Node " << *I
1764                  << " has no non-undef arguments, valuing it as undef\n");
1765       return createConstantExpression(UndefValue::get(I->getType()));
1766     }
1767     if (HasPoison) {
1768       LLVM_DEBUG(
1769           dbgs() << "PHI Node " << *I
1770                  << " has no non-poison arguments, valuing it as poison\n");
1771       return createConstantExpression(PoisonValue::get(I->getType()));
1772     }
1773 
1774     LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n");
1775     deleteExpression(E);
1776     return createDeadExpression();
1777   }
1778   Value *AllSameValue = *(Filtered.begin());
1779   ++Filtered.begin();
1780   // Can't use std::equal here, sadly, because filter.begin moves.
1781   if (llvm::all_of(Filtered, [&](Value *Arg) { return Arg == AllSameValue; })) {
1782     // Can't fold phi(undef, X) -> X unless X can't be poison (thus X is undef
1783     // in the worst case).
1784     if (HasUndef && !isGuaranteedNotToBePoison(AllSameValue, AC, nullptr, DT))
1785       return E;
1786 
1787     // In LLVM's non-standard representation of phi nodes, it's possible to have
1788     // phi nodes with cycles (IE dependent on other phis that are .... dependent
1789     // on the original phi node), especially in weird CFG's where some arguments
1790     // are unreachable, or uninitialized along certain paths.  This can cause
1791     // infinite loops during evaluation. We work around this by not trying to
1792     // really evaluate them independently, but instead using a variable
1793     // expression to say if one is equivalent to the other.
1794     // We also special case undef/poison, so that if we have an undef, we can't
1795     // use the common value unless it dominates the phi block.
1796     if (HasPoison || HasUndef) {
1797       // If we have undef and at least one other value, this is really a
1798       // multivalued phi, and we need to know if it's cycle free in order to
1799       // evaluate whether we can ignore the undef.  The other parts of this are
1800       // just shortcuts.  If there is no backedge, or all operands are
1801       // constants, it also must be cycle free.
1802       if (HasBackedge && !OriginalOpsConstant &&
1803           !isa<UndefValue>(AllSameValue) && !isCycleFree(I))
1804         return E;
1805 
1806       // Only have to check for instructions
1807       if (auto *AllSameInst = dyn_cast<Instruction>(AllSameValue))
1808         if (!someEquivalentDominates(AllSameInst, I))
1809           return E;
1810     }
1811     // Can't simplify to something that comes later in the iteration.
1812     // Otherwise, when and if it changes congruence class, we will never catch
1813     // up. We will always be a class behind it.
1814     if (isa<Instruction>(AllSameValue) &&
1815         InstrToDFSNum(AllSameValue) > InstrToDFSNum(I))
1816       return E;
1817     NumGVNPhisAllSame++;
1818     LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue
1819                       << "\n");
1820     deleteExpression(E);
1821     return createVariableOrConstant(AllSameValue);
1822   }
1823   return E;
1824 }
1825 
1826 const Expression *
1827 NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const {
1828   if (auto *EI = dyn_cast<ExtractValueInst>(I)) {
1829     auto *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
1830     if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0)
1831       // EI is an extract from one of our with.overflow intrinsics. Synthesize
1832       // a semantically equivalent expression instead of an extract value
1833       // expression.
1834       return createBinaryExpression(WO->getBinaryOp(), EI->getType(),
1835                                     WO->getLHS(), WO->getRHS(), I);
1836   }
1837 
1838   return createAggregateValueExpression(I);
1839 }
1840 
1841 NewGVN::ExprResult NewGVN::performSymbolicCmpEvaluation(Instruction *I) const {
1842   assert(isa<CmpInst>(I) && "Expected a cmp instruction.");
1843 
1844   auto *CI = cast<CmpInst>(I);
1845   // See if our operands are equal to those of a previous predicate, and if so,
1846   // if it implies true or false.
1847   auto Op0 = lookupOperandLeader(CI->getOperand(0));
1848   auto Op1 = lookupOperandLeader(CI->getOperand(1));
1849   auto OurPredicate = CI->getPredicate();
1850   if (shouldSwapOperands(Op0, Op1)) {
1851     std::swap(Op0, Op1);
1852     OurPredicate = CI->getSwappedPredicate();
1853   }
1854 
1855   // Avoid processing the same info twice.
1856   const PredicateBase *LastPredInfo = nullptr;
1857   // See if we know something about the comparison itself, like it is the target
1858   // of an assume.
1859   auto *CmpPI = PredInfo->getPredicateInfoFor(I);
1860   if (isa_and_nonnull<PredicateAssume>(CmpPI))
1861     return ExprResult::some(
1862         createConstantExpression(ConstantInt::getTrue(CI->getType())));
1863 
1864   if (Op0 == Op1) {
1865     // This condition does not depend on predicates, no need to add users
1866     if (CI->isTrueWhenEqual())
1867       return ExprResult::some(
1868           createConstantExpression(ConstantInt::getTrue(CI->getType())));
1869     else if (CI->isFalseWhenEqual())
1870       return ExprResult::some(
1871           createConstantExpression(ConstantInt::getFalse(CI->getType())));
1872   }
1873 
1874   // NOTE: Because we are comparing both operands here and below, and using
1875   // previous comparisons, we rely on fact that predicateinfo knows to mark
1876   // comparisons that use renamed operands as users of the earlier comparisons.
1877   // It is *not* enough to just mark predicateinfo renamed operands as users of
1878   // the earlier comparisons, because the *other* operand may have changed in a
1879   // previous iteration.
1880   // Example:
1881   // icmp slt %a, %b
1882   // %b.0 = ssa.copy(%b)
1883   // false branch:
1884   // icmp slt %c, %b.0
1885 
1886   // %c and %a may start out equal, and thus, the code below will say the second
1887   // %icmp is false.  c may become equal to something else, and in that case the
1888   // %second icmp *must* be reexamined, but would not if only the renamed
1889   // %operands are considered users of the icmp.
1890 
1891   // *Currently* we only check one level of comparisons back, and only mark one
1892   // level back as touched when changes happen.  If you modify this code to look
1893   // back farther through comparisons, you *must* mark the appropriate
1894   // comparisons as users in PredicateInfo.cpp, or you will cause bugs.  See if
1895   // we know something just from the operands themselves
1896 
1897   // See if our operands have predicate info, so that we may be able to derive
1898   // something from a previous comparison.
1899   for (const auto &Op : CI->operands()) {
1900     auto *PI = PredInfo->getPredicateInfoFor(Op);
1901     if (const auto *PBranch = dyn_cast_or_null<PredicateBranch>(PI)) {
1902       if (PI == LastPredInfo)
1903         continue;
1904       LastPredInfo = PI;
1905       // In phi of ops cases, we may have predicate info that we are evaluating
1906       // in a different context.
1907       if (!DT->dominates(PBranch->To, getBlockForValue(I)))
1908         continue;
1909       // TODO: Along the false edge, we may know more things too, like
1910       // icmp of
1911       // same operands is false.
1912       // TODO: We only handle actual comparison conditions below, not
1913       // and/or.
1914       auto *BranchCond = dyn_cast<CmpInst>(PBranch->Condition);
1915       if (!BranchCond)
1916         continue;
1917       auto *BranchOp0 = lookupOperandLeader(BranchCond->getOperand(0));
1918       auto *BranchOp1 = lookupOperandLeader(BranchCond->getOperand(1));
1919       auto BranchPredicate = BranchCond->getPredicate();
1920       if (shouldSwapOperands(BranchOp0, BranchOp1)) {
1921         std::swap(BranchOp0, BranchOp1);
1922         BranchPredicate = BranchCond->getSwappedPredicate();
1923       }
1924       if (BranchOp0 == Op0 && BranchOp1 == Op1) {
1925         if (PBranch->TrueEdge) {
1926           // If we know the previous predicate is true and we are in the true
1927           // edge then we may be implied true or false.
1928           if (CmpInst::isImpliedTrueByMatchingCmp(BranchPredicate,
1929                                                   OurPredicate)) {
1930             return ExprResult::some(
1931                 createConstantExpression(ConstantInt::getTrue(CI->getType())),
1932                 PI);
1933           }
1934 
1935           if (CmpInst::isImpliedFalseByMatchingCmp(BranchPredicate,
1936                                                    OurPredicate)) {
1937             return ExprResult::some(
1938                 createConstantExpression(ConstantInt::getFalse(CI->getType())),
1939                 PI);
1940           }
1941         } else {
1942           // Just handle the ne and eq cases, where if we have the same
1943           // operands, we may know something.
1944           if (BranchPredicate == OurPredicate) {
1945             // Same predicate, same ops,we know it was false, so this is false.
1946             return ExprResult::some(
1947                 createConstantExpression(ConstantInt::getFalse(CI->getType())),
1948                 PI);
1949           } else if (BranchPredicate ==
1950                      CmpInst::getInversePredicate(OurPredicate)) {
1951             // Inverse predicate, we know the other was false, so this is true.
1952             return ExprResult::some(
1953                 createConstantExpression(ConstantInt::getTrue(CI->getType())),
1954                 PI);
1955           }
1956         }
1957       }
1958     }
1959   }
1960   // Create expression will take care of simplifyCmpInst
1961   return createExpression(I);
1962 }
1963 
1964 // Substitute and symbolize the value before value numbering.
1965 NewGVN::ExprResult
1966 NewGVN::performSymbolicEvaluation(Value *V,
1967                                   SmallPtrSetImpl<Value *> &Visited) const {
1968 
1969   const Expression *E = nullptr;
1970   if (auto *C = dyn_cast<Constant>(V))
1971     E = createConstantExpression(C);
1972   else if (isa<Argument>(V) || isa<GlobalVariable>(V)) {
1973     E = createVariableExpression(V);
1974   } else {
1975     // TODO: memory intrinsics.
1976     // TODO: Some day, we should do the forward propagation and reassociation
1977     // parts of the algorithm.
1978     auto *I = cast<Instruction>(V);
1979     switch (I->getOpcode()) {
1980     case Instruction::ExtractValue:
1981     case Instruction::InsertValue:
1982       E = performSymbolicAggrValueEvaluation(I);
1983       break;
1984     case Instruction::PHI: {
1985       SmallVector<ValPair, 3> Ops;
1986       auto *PN = cast<PHINode>(I);
1987       for (unsigned i = 0; i < PN->getNumOperands(); ++i)
1988         Ops.push_back({PN->getIncomingValue(i), PN->getIncomingBlock(i)});
1989       // Sort to ensure the invariant createPHIExpression requires is met.
1990       sortPHIOps(Ops);
1991       E = performSymbolicPHIEvaluation(Ops, I, getBlockForValue(I));
1992     } break;
1993     case Instruction::Call:
1994       return performSymbolicCallEvaluation(I);
1995       break;
1996     case Instruction::Store:
1997       E = performSymbolicStoreEvaluation(I);
1998       break;
1999     case Instruction::Load:
2000       E = performSymbolicLoadEvaluation(I);
2001       break;
2002     case Instruction::BitCast:
2003     case Instruction::AddrSpaceCast:
2004     case Instruction::Freeze:
2005       return createExpression(I);
2006       break;
2007     case Instruction::ICmp:
2008     case Instruction::FCmp:
2009       return performSymbolicCmpEvaluation(I);
2010       break;
2011     case Instruction::FNeg:
2012     case Instruction::Add:
2013     case Instruction::FAdd:
2014     case Instruction::Sub:
2015     case Instruction::FSub:
2016     case Instruction::Mul:
2017     case Instruction::FMul:
2018     case Instruction::UDiv:
2019     case Instruction::SDiv:
2020     case Instruction::FDiv:
2021     case Instruction::URem:
2022     case Instruction::SRem:
2023     case Instruction::FRem:
2024     case Instruction::Shl:
2025     case Instruction::LShr:
2026     case Instruction::AShr:
2027     case Instruction::And:
2028     case Instruction::Or:
2029     case Instruction::Xor:
2030     case Instruction::Trunc:
2031     case Instruction::ZExt:
2032     case Instruction::SExt:
2033     case Instruction::FPToUI:
2034     case Instruction::FPToSI:
2035     case Instruction::UIToFP:
2036     case Instruction::SIToFP:
2037     case Instruction::FPTrunc:
2038     case Instruction::FPExt:
2039     case Instruction::PtrToInt:
2040     case Instruction::IntToPtr:
2041     case Instruction::Select:
2042     case Instruction::ExtractElement:
2043     case Instruction::InsertElement:
2044     case Instruction::GetElementPtr:
2045       return createExpression(I);
2046       break;
2047     case Instruction::ShuffleVector:
2048       // FIXME: Add support for shufflevector to createExpression.
2049       return ExprResult::none();
2050     default:
2051       return ExprResult::none();
2052     }
2053   }
2054   return ExprResult::some(E);
2055 }
2056 
2057 // Look up a container of values/instructions in a map, and touch all the
2058 // instructions in the container.  Then erase value from the map.
2059 template <typename Map, typename KeyType>
2060 void NewGVN::touchAndErase(Map &M, const KeyType &Key) {
2061   const auto Result = M.find_as(Key);
2062   if (Result != M.end()) {
2063     for (const typename Map::mapped_type::value_type Mapped : Result->second)
2064       TouchedInstructions.set(InstrToDFSNum(Mapped));
2065     M.erase(Result);
2066   }
2067 }
2068 
2069 void NewGVN::addAdditionalUsers(Value *To, Value *User) const {
2070   assert(User && To != User);
2071   if (isa<Instruction>(To))
2072     AdditionalUsers[To].insert(User);
2073 }
2074 
2075 void NewGVN::addAdditionalUsers(ExprResult &Res, Instruction *User) const {
2076   if (Res.ExtraDep && Res.ExtraDep != User)
2077     addAdditionalUsers(Res.ExtraDep, User);
2078   Res.ExtraDep = nullptr;
2079 
2080   if (Res.PredDep) {
2081     if (const auto *PBranch = dyn_cast<PredicateBranch>(Res.PredDep))
2082       PredicateToUsers[PBranch->Condition].insert(User);
2083     else if (const auto *PAssume = dyn_cast<PredicateAssume>(Res.PredDep))
2084       PredicateToUsers[PAssume->Condition].insert(User);
2085   }
2086   Res.PredDep = nullptr;
2087 }
2088 
2089 void NewGVN::markUsersTouched(Value *V) {
2090   // Now mark the users as touched.
2091   for (auto *User : V->users()) {
2092     assert(isa<Instruction>(User) && "Use of value not within an instruction?");
2093     TouchedInstructions.set(InstrToDFSNum(User));
2094   }
2095   touchAndErase(AdditionalUsers, V);
2096 }
2097 
2098 void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const {
2099   LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n");
2100   MemoryToUsers[To].insert(U);
2101 }
2102 
2103 void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) {
2104   TouchedInstructions.set(MemoryToDFSNum(MA));
2105 }
2106 
2107 void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) {
2108   if (isa<MemoryUse>(MA))
2109     return;
2110   for (const auto *U : MA->users())
2111     TouchedInstructions.set(MemoryToDFSNum(U));
2112   touchAndErase(MemoryToUsers, MA);
2113 }
2114 
2115 // Touch all the predicates that depend on this instruction.
2116 void NewGVN::markPredicateUsersTouched(Instruction *I) {
2117   touchAndErase(PredicateToUsers, I);
2118 }
2119 
2120 // Mark users affected by a memory leader change.
2121 void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) {
2122   for (const auto *M : CC->memory())
2123     markMemoryDefTouched(M);
2124 }
2125 
2126 // Touch the instructions that need to be updated after a congruence class has a
2127 // leader change, and mark changed values.
2128 void NewGVN::markValueLeaderChangeTouched(CongruenceClass *CC) {
2129   for (auto *M : *CC) {
2130     if (auto *I = dyn_cast<Instruction>(M))
2131       TouchedInstructions.set(InstrToDFSNum(I));
2132     LeaderChanges.insert(M);
2133   }
2134 }
2135 
2136 // Give a range of things that have instruction DFS numbers, this will return
2137 // the member of the range with the smallest dfs number.
2138 template <class T, class Range>
2139 T *NewGVN::getMinDFSOfRange(const Range &R) const {
2140   std::pair<T *, unsigned> MinDFS = {nullptr, ~0U};
2141   for (const auto X : R) {
2142     auto DFSNum = InstrToDFSNum(X);
2143     if (DFSNum < MinDFS.second)
2144       MinDFS = {X, DFSNum};
2145   }
2146   return MinDFS.first;
2147 }
2148 
2149 // This function returns the MemoryAccess that should be the next leader of
2150 // congruence class CC, under the assumption that the current leader is going to
2151 // disappear.
2152 const MemoryAccess *NewGVN::getNextMemoryLeader(CongruenceClass *CC) const {
2153   // TODO: If this ends up to slow, we can maintain a next memory leader like we
2154   // do for regular leaders.
2155   // Make sure there will be a leader to find.
2156   assert(!CC->definesNoMemory() && "Can't get next leader if there is none");
2157   if (CC->getStoreCount() > 0) {
2158     if (auto *NL = dyn_cast_or_null<StoreInst>(CC->getNextLeader().first))
2159       return getMemoryAccess(NL);
2160     // Find the store with the minimum DFS number.
2161     auto *V = getMinDFSOfRange<Value>(make_filter_range(
2162         *CC, [&](const Value *V) { return isa<StoreInst>(V); }));
2163     return getMemoryAccess(cast<StoreInst>(V));
2164   }
2165   assert(CC->getStoreCount() == 0);
2166 
2167   // Given our assertion, hitting this part must mean
2168   // !OldClass->memory_empty()
2169   if (CC->memory_size() == 1)
2170     return *CC->memory_begin();
2171   return getMinDFSOfRange<const MemoryPhi>(CC->memory());
2172 }
2173 
2174 // This function returns the next value leader of a congruence class, under the
2175 // assumption that the current leader is going away.  This should end up being
2176 // the next most dominating member.
2177 Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const {
2178   // We don't need to sort members if there is only 1, and we don't care about
2179   // sorting the TOP class because everything either gets out of it or is
2180   // unreachable.
2181 
2182   if (CC->size() == 1 || CC == TOPClass) {
2183     return *(CC->begin());
2184   } else if (CC->getNextLeader().first) {
2185     ++NumGVNAvoidedSortedLeaderChanges;
2186     return CC->getNextLeader().first;
2187   } else {
2188     ++NumGVNSortedLeaderChanges;
2189     // NOTE: If this ends up to slow, we can maintain a dual structure for
2190     // member testing/insertion, or keep things mostly sorted, and sort only
2191     // here, or use SparseBitVector or ....
2192     return getMinDFSOfRange<Value>(*CC);
2193   }
2194 }
2195 
2196 // Move a MemoryAccess, currently in OldClass, to NewClass, including updates to
2197 // the memory members, etc for the move.
2198 //
2199 // The invariants of this function are:
2200 //
2201 // - I must be moving to NewClass from OldClass
2202 // - The StoreCount of OldClass and NewClass is expected to have been updated
2203 //   for I already if it is a store.
2204 // - The OldClass memory leader has not been updated yet if I was the leader.
2205 void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
2206                                             MemoryAccess *InstMA,
2207                                             CongruenceClass *OldClass,
2208                                             CongruenceClass *NewClass) {
2209   // If the leader is I, and we had a representative MemoryAccess, it should
2210   // be the MemoryAccess of OldClass.
2211   assert((!InstMA || !OldClass->getMemoryLeader() ||
2212           OldClass->getLeader() != I ||
2213           MemoryAccessToClass.lookup(OldClass->getMemoryLeader()) ==
2214               MemoryAccessToClass.lookup(InstMA)) &&
2215          "Representative MemoryAccess mismatch");
2216   // First, see what happens to the new class
2217   if (!NewClass->getMemoryLeader()) {
2218     // Should be a new class, or a store becoming a leader of a new class.
2219     assert(NewClass->size() == 1 ||
2220            (isa<StoreInst>(I) && NewClass->getStoreCount() == 1));
2221     NewClass->setMemoryLeader(InstMA);
2222     // Mark it touched if we didn't just create a singleton
2223     LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2224                       << NewClass->getID()
2225                       << " due to new memory instruction becoming leader\n");
2226     markMemoryLeaderChangeTouched(NewClass);
2227   }
2228   setMemoryClass(InstMA, NewClass);
2229   // Now, fixup the old class if necessary
2230   if (OldClass->getMemoryLeader() == InstMA) {
2231     if (!OldClass->definesNoMemory()) {
2232       OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
2233       LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2234                         << OldClass->getID() << " to "
2235                         << *OldClass->getMemoryLeader()
2236                         << " due to removal of old leader " << *InstMA << "\n");
2237       markMemoryLeaderChangeTouched(OldClass);
2238     } else
2239       OldClass->setMemoryLeader(nullptr);
2240   }
2241 }
2242 
2243 // Move a value, currently in OldClass, to be part of NewClass
2244 // Update OldClass and NewClass for the move (including changing leaders, etc).
2245 void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
2246                                            CongruenceClass *OldClass,
2247                                            CongruenceClass *NewClass) {
2248   if (I == OldClass->getNextLeader().first)
2249     OldClass->resetNextLeader();
2250 
2251   OldClass->erase(I);
2252   NewClass->insert(I);
2253 
2254   if (NewClass->getLeader() != I)
2255     NewClass->addPossibleNextLeader({I, InstrToDFSNum(I)});
2256   // Handle our special casing of stores.
2257   if (auto *SI = dyn_cast<StoreInst>(I)) {
2258     OldClass->decStoreCount();
2259     // Okay, so when do we want to make a store a leader of a class?
2260     // If we have a store defined by an earlier load, we want the earlier load
2261     // to lead the class.
2262     // If we have a store defined by something else, we want the store to lead
2263     // the class so everything else gets the "something else" as a value.
2264     // If we have a store as the single member of the class, we want the store
2265     // as the leader
2266     if (NewClass->getStoreCount() == 0 && !NewClass->getStoredValue()) {
2267       // If it's a store expression we are using, it means we are not equivalent
2268       // to something earlier.
2269       if (auto *SE = dyn_cast<StoreExpression>(E)) {
2270         NewClass->setStoredValue(SE->getStoredValue());
2271         markValueLeaderChangeTouched(NewClass);
2272         // Shift the new class leader to be the store
2273         LLVM_DEBUG(dbgs() << "Changing leader of congruence class "
2274                           << NewClass->getID() << " from "
2275                           << *NewClass->getLeader() << " to  " << *SI
2276                           << " because store joined class\n");
2277         // If we changed the leader, we have to mark it changed because we don't
2278         // know what it will do to symbolic evaluation.
2279         NewClass->setLeader(SI);
2280       }
2281       // We rely on the code below handling the MemoryAccess change.
2282     }
2283     NewClass->incStoreCount();
2284   }
2285   // True if there is no memory instructions left in a class that had memory
2286   // instructions before.
2287 
2288   // If it's not a memory use, set the MemoryAccess equivalence
2289   auto *InstMA = dyn_cast_or_null<MemoryDef>(getMemoryAccess(I));
2290   if (InstMA)
2291     moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass);
2292   ValueToClass[I] = NewClass;
2293   // See if we destroyed the class or need to swap leaders.
2294   if (OldClass->empty() && OldClass != TOPClass) {
2295     if (OldClass->getDefiningExpr()) {
2296       LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr()
2297                         << " from table\n");
2298       // We erase it as an exact expression to make sure we don't just erase an
2299       // equivalent one.
2300       auto Iter = ExpressionToClass.find_as(
2301           ExactEqualsExpression(*OldClass->getDefiningExpr()));
2302       if (Iter != ExpressionToClass.end())
2303         ExpressionToClass.erase(Iter);
2304 #ifdef EXPENSIVE_CHECKS
2305       assert(
2306           (*OldClass->getDefiningExpr() != *E || ExpressionToClass.lookup(E)) &&
2307           "We erased the expression we just inserted, which should not happen");
2308 #endif
2309     }
2310   } else if (OldClass->getLeader() == I) {
2311     // When the leader changes, the value numbering of
2312     // everything may change due to symbolization changes, so we need to
2313     // reprocess.
2314     LLVM_DEBUG(dbgs() << "Value class leader change for class "
2315                       << OldClass->getID() << "\n");
2316     ++NumGVNLeaderChanges;
2317     // Destroy the stored value if there are no more stores to represent it.
2318     // Note that this is basically clean up for the expression removal that
2319     // happens below.  If we remove stores from a class, we may leave it as a
2320     // class of equivalent memory phis.
2321     if (OldClass->getStoreCount() == 0) {
2322       if (OldClass->getStoredValue())
2323         OldClass->setStoredValue(nullptr);
2324     }
2325     OldClass->setLeader(getNextValueLeader(OldClass));
2326     OldClass->resetNextLeader();
2327     markValueLeaderChangeTouched(OldClass);
2328   }
2329 }
2330 
2331 // For a given expression, mark the phi of ops instructions that could have
2332 // changed as a result.
2333 void NewGVN::markPhiOfOpsChanged(const Expression *E) {
2334   touchAndErase(ExpressionToPhiOfOps, E);
2335 }
2336 
2337 // Perform congruence finding on a given value numbering expression.
2338 void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) {
2339   // This is guaranteed to return something, since it will at least find
2340   // TOP.
2341 
2342   CongruenceClass *IClass = ValueToClass.lookup(I);
2343   assert(IClass && "Should have found a IClass");
2344   // Dead classes should have been eliminated from the mapping.
2345   assert(!IClass->isDead() && "Found a dead class");
2346 
2347   CongruenceClass *EClass = nullptr;
2348   if (const auto *VE = dyn_cast<VariableExpression>(E)) {
2349     EClass = ValueToClass.lookup(VE->getVariableValue());
2350   } else if (isa<DeadExpression>(E)) {
2351     EClass = TOPClass;
2352   }
2353   if (!EClass) {
2354     auto lookupResult = ExpressionToClass.insert({E, nullptr});
2355 
2356     // If it's not in the value table, create a new congruence class.
2357     if (lookupResult.second) {
2358       CongruenceClass *NewClass = createCongruenceClass(nullptr, E);
2359       auto place = lookupResult.first;
2360       place->second = NewClass;
2361 
2362       // Constants and variables should always be made the leader.
2363       if (const auto *CE = dyn_cast<ConstantExpression>(E)) {
2364         NewClass->setLeader(CE->getConstantValue());
2365       } else if (const auto *SE = dyn_cast<StoreExpression>(E)) {
2366         StoreInst *SI = SE->getStoreInst();
2367         NewClass->setLeader(SI);
2368         NewClass->setStoredValue(SE->getStoredValue());
2369         // The RepMemoryAccess field will be filled in properly by the
2370         // moveValueToNewCongruenceClass call.
2371       } else {
2372         NewClass->setLeader(I);
2373       }
2374       assert(!isa<VariableExpression>(E) &&
2375              "VariableExpression should have been handled already");
2376 
2377       EClass = NewClass;
2378       LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I
2379                         << " using expression " << *E << " at "
2380                         << NewClass->getID() << " and leader "
2381                         << *(NewClass->getLeader()));
2382       if (NewClass->getStoredValue())
2383         LLVM_DEBUG(dbgs() << " and stored value "
2384                           << *(NewClass->getStoredValue()));
2385       LLVM_DEBUG(dbgs() << "\n");
2386     } else {
2387       EClass = lookupResult.first->second;
2388       if (isa<ConstantExpression>(E))
2389         assert((isa<Constant>(EClass->getLeader()) ||
2390                 (EClass->getStoredValue() &&
2391                  isa<Constant>(EClass->getStoredValue()))) &&
2392                "Any class with a constant expression should have a "
2393                "constant leader");
2394 
2395       assert(EClass && "Somehow don't have an eclass");
2396 
2397       assert(!EClass->isDead() && "We accidentally looked up a dead class");
2398     }
2399   }
2400   bool ClassChanged = IClass != EClass;
2401   bool LeaderChanged = LeaderChanges.erase(I);
2402   if (ClassChanged || LeaderChanged) {
2403     LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression "
2404                       << *E << "\n");
2405     if (ClassChanged) {
2406       moveValueToNewCongruenceClass(I, E, IClass, EClass);
2407       markPhiOfOpsChanged(E);
2408     }
2409 
2410     markUsersTouched(I);
2411     if (MemoryAccess *MA = getMemoryAccess(I))
2412       markMemoryUsersTouched(MA);
2413     if (auto *CI = dyn_cast<CmpInst>(I))
2414       markPredicateUsersTouched(CI);
2415   }
2416   // If we changed the class of the store, we want to ensure nothing finds the
2417   // old store expression.  In particular, loads do not compare against stored
2418   // value, so they will find old store expressions (and associated class
2419   // mappings) if we leave them in the table.
2420   if (ClassChanged && isa<StoreInst>(I)) {
2421     auto *OldE = ValueToExpression.lookup(I);
2422     // It could just be that the old class died. We don't want to erase it if we
2423     // just moved classes.
2424     if (OldE && isa<StoreExpression>(OldE) && *E != *OldE) {
2425       // Erase this as an exact expression to ensure we don't erase expressions
2426       // equivalent to it.
2427       auto Iter = ExpressionToClass.find_as(ExactEqualsExpression(*OldE));
2428       if (Iter != ExpressionToClass.end())
2429         ExpressionToClass.erase(Iter);
2430     }
2431   }
2432   ValueToExpression[I] = E;
2433 }
2434 
2435 // Process the fact that Edge (from, to) is reachable, including marking
2436 // any newly reachable blocks and instructions for processing.
2437 void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) {
2438   // Check if the Edge was reachable before.
2439   if (ReachableEdges.insert({From, To}).second) {
2440     // If this block wasn't reachable before, all instructions are touched.
2441     if (ReachableBlocks.insert(To).second) {
2442       LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
2443                         << " marked reachable\n");
2444       const auto &InstRange = BlockInstRange.lookup(To);
2445       TouchedInstructions.set(InstRange.first, InstRange.second);
2446     } else {
2447       LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
2448                         << " was reachable, but new edge {"
2449                         << getBlockName(From) << "," << getBlockName(To)
2450                         << "} to it found\n");
2451 
2452       // We've made an edge reachable to an existing block, which may
2453       // impact predicates. Otherwise, only mark the phi nodes as touched, as
2454       // they are the only thing that depend on new edges. Anything using their
2455       // values will get propagated to if necessary.
2456       if (MemoryAccess *MemPhi = getMemoryAccess(To))
2457         TouchedInstructions.set(InstrToDFSNum(MemPhi));
2458 
2459       // FIXME: We should just add a union op on a Bitvector and
2460       // SparseBitVector.  We can do it word by word faster than we are doing it
2461       // here.
2462       for (auto InstNum : RevisitOnReachabilityChange[To])
2463         TouchedInstructions.set(InstNum);
2464     }
2465   }
2466 }
2467 
2468 // Given a predicate condition (from a switch, cmp, or whatever) and a block,
2469 // see if we know some constant value for it already.
2470 Value *NewGVN::findConditionEquivalence(Value *Cond) const {
2471   auto Result = lookupOperandLeader(Cond);
2472   return isa<Constant>(Result) ? Result : nullptr;
2473 }
2474 
2475 // Process the outgoing edges of a block for reachability.
2476 void NewGVN::processOutgoingEdges(Instruction *TI, BasicBlock *B) {
2477   // Evaluate reachability of terminator instruction.
2478   Value *Cond;
2479   BasicBlock *TrueSucc, *FalseSucc;
2480   if (match(TI, m_Br(m_Value(Cond), TrueSucc, FalseSucc))) {
2481     Value *CondEvaluated = findConditionEquivalence(Cond);
2482     if (!CondEvaluated) {
2483       if (auto *I = dyn_cast<Instruction>(Cond)) {
2484         SmallPtrSet<Value *, 4> Visited;
2485         auto Res = performSymbolicEvaluation(I, Visited);
2486         if (const auto *CE = dyn_cast_or_null<ConstantExpression>(Res.Expr)) {
2487           CondEvaluated = CE->getConstantValue();
2488           addAdditionalUsers(Res, I);
2489         } else {
2490           // Did not use simplification result, no need to add the extra
2491           // dependency.
2492           Res.ExtraDep = nullptr;
2493         }
2494       } else if (isa<ConstantInt>(Cond)) {
2495         CondEvaluated = Cond;
2496       }
2497     }
2498     ConstantInt *CI;
2499     if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) {
2500       if (CI->isOne()) {
2501         LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2502                           << " evaluated to true\n");
2503         updateReachableEdge(B, TrueSucc);
2504       } else if (CI->isZero()) {
2505         LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2506                           << " evaluated to false\n");
2507         updateReachableEdge(B, FalseSucc);
2508       }
2509     } else {
2510       updateReachableEdge(B, TrueSucc);
2511       updateReachableEdge(B, FalseSucc);
2512     }
2513   } else if (auto *SI = dyn_cast<SwitchInst>(TI)) {
2514     // For switches, propagate the case values into the case
2515     // destinations.
2516 
2517     Value *SwitchCond = SI->getCondition();
2518     Value *CondEvaluated = findConditionEquivalence(SwitchCond);
2519     // See if we were able to turn this switch statement into a constant.
2520     if (CondEvaluated && isa<ConstantInt>(CondEvaluated)) {
2521       auto *CondVal = cast<ConstantInt>(CondEvaluated);
2522       // We should be able to get case value for this.
2523       auto Case = *SI->findCaseValue(CondVal);
2524       if (Case.getCaseSuccessor() == SI->getDefaultDest()) {
2525         // We proved the value is outside of the range of the case.
2526         // We can't do anything other than mark the default dest as reachable,
2527         // and go home.
2528         updateReachableEdge(B, SI->getDefaultDest());
2529         return;
2530       }
2531       // Now get where it goes and mark it reachable.
2532       BasicBlock *TargetBlock = Case.getCaseSuccessor();
2533       updateReachableEdge(B, TargetBlock);
2534     } else {
2535       for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) {
2536         BasicBlock *TargetBlock = SI->getSuccessor(i);
2537         updateReachableEdge(B, TargetBlock);
2538       }
2539     }
2540   } else {
2541     // Otherwise this is either unconditional, or a type we have no
2542     // idea about. Just mark successors as reachable.
2543     for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
2544       BasicBlock *TargetBlock = TI->getSuccessor(i);
2545       updateReachableEdge(B, TargetBlock);
2546     }
2547 
2548     // This also may be a memory defining terminator, in which case, set it
2549     // equivalent only to itself.
2550     //
2551     auto *MA = getMemoryAccess(TI);
2552     if (MA && !isa<MemoryUse>(MA)) {
2553       auto *CC = ensureLeaderOfMemoryClass(MA);
2554       if (setMemoryClass(MA, CC))
2555         markMemoryUsersTouched(MA);
2556     }
2557   }
2558 }
2559 
2560 // Remove the PHI of Ops PHI for I
2561 void NewGVN::removePhiOfOps(Instruction *I, PHINode *PHITemp) {
2562   InstrDFS.erase(PHITemp);
2563   // It's still a temp instruction. We keep it in the array so it gets erased.
2564   // However, it's no longer used by I, or in the block
2565   TempToBlock.erase(PHITemp);
2566   RealToTemp.erase(I);
2567   // We don't remove the users from the phi node uses. This wastes a little
2568   // time, but such is life.  We could use two sets to track which were there
2569   // are the start of NewGVN, and which were added, but right nowt he cost of
2570   // tracking is more than the cost of checking for more phi of ops.
2571 }
2572 
2573 // Add PHI Op in BB as a PHI of operations version of ExistingValue.
2574 void NewGVN::addPhiOfOps(PHINode *Op, BasicBlock *BB,
2575                          Instruction *ExistingValue) {
2576   InstrDFS[Op] = InstrToDFSNum(ExistingValue);
2577   AllTempInstructions.insert(Op);
2578   TempToBlock[Op] = BB;
2579   RealToTemp[ExistingValue] = Op;
2580   // Add all users to phi node use, as they are now uses of the phi of ops phis
2581   // and may themselves be phi of ops.
2582   for (auto *U : ExistingValue->users())
2583     if (auto *UI = dyn_cast<Instruction>(U))
2584       PHINodeUses.insert(UI);
2585 }
2586 
2587 static bool okayForPHIOfOps(const Instruction *I) {
2588   if (!EnablePhiOfOps)
2589     return false;
2590   return isa<BinaryOperator>(I) || isa<SelectInst>(I) || isa<CmpInst>(I) ||
2591          isa<LoadInst>(I);
2592 }
2593 
2594 // Return true if this operand will be safe to use for phi of ops.
2595 //
2596 // The reason some operands are unsafe is that we are not trying to recursively
2597 // translate everything back through phi nodes.  We actually expect some lookups
2598 // of expressions to fail.  In particular, a lookup where the expression cannot
2599 // exist in the predecessor.  This is true even if the expression, as shown, can
2600 // be determined to be constant.
2601 bool NewGVN::OpIsSafeForPHIOfOps(Value *V, const BasicBlock *PHIBlock,
2602                                  SmallPtrSetImpl<const Value *> &Visited) {
2603   SmallVector<Value *, 4> Worklist;
2604   Worklist.push_back(V);
2605   while (!Worklist.empty()) {
2606     auto *I = Worklist.pop_back_val();
2607     if (!isa<Instruction>(I))
2608       continue;
2609 
2610     auto OISIt = OpSafeForPHIOfOps.find(I);
2611     if (OISIt != OpSafeForPHIOfOps.end())
2612       return OISIt->second;
2613 
2614     // Keep walking until we either dominate the phi block, or hit a phi, or run
2615     // out of things to check.
2616     if (DT->properlyDominates(getBlockForValue(I), PHIBlock)) {
2617       OpSafeForPHIOfOps.insert({I, true});
2618       continue;
2619     }
2620     // PHI in the same block.
2621     if (isa<PHINode>(I) && getBlockForValue(I) == PHIBlock) {
2622       OpSafeForPHIOfOps.insert({I, false});
2623       return false;
2624     }
2625 
2626     auto *OrigI = cast<Instruction>(I);
2627     // When we hit an instruction that reads memory (load, call, etc), we must
2628     // consider any store that may happen in the loop. For now, we assume the
2629     // worst: there is a store in the loop that alias with this read.
2630     // The case where the load is outside the loop is already covered by the
2631     // dominator check above.
2632     // TODO: relax this condition
2633     if (OrigI->mayReadFromMemory())
2634       return false;
2635 
2636     // Check the operands of the current instruction.
2637     for (auto *Op : OrigI->operand_values()) {
2638       if (!isa<Instruction>(Op))
2639         continue;
2640       // Stop now if we find an unsafe operand.
2641       auto OISIt = OpSafeForPHIOfOps.find(OrigI);
2642       if (OISIt != OpSafeForPHIOfOps.end()) {
2643         if (!OISIt->second) {
2644           OpSafeForPHIOfOps.insert({I, false});
2645           return false;
2646         }
2647         continue;
2648       }
2649       if (!Visited.insert(Op).second)
2650         continue;
2651       Worklist.push_back(cast<Instruction>(Op));
2652     }
2653   }
2654   OpSafeForPHIOfOps.insert({V, true});
2655   return true;
2656 }
2657 
2658 // Try to find a leader for instruction TransInst, which is a phi translated
2659 // version of something in our original program.  Visited is used to ensure we
2660 // don't infinite loop during translations of cycles.  OrigInst is the
2661 // instruction in the original program, and PredBB is the predecessor we
2662 // translated it through.
2663 Value *NewGVN::findLeaderForInst(Instruction *TransInst,
2664                                  SmallPtrSetImpl<Value *> &Visited,
2665                                  MemoryAccess *MemAccess, Instruction *OrigInst,
2666                                  BasicBlock *PredBB) {
2667   unsigned IDFSNum = InstrToDFSNum(OrigInst);
2668   // Make sure it's marked as a temporary instruction.
2669   AllTempInstructions.insert(TransInst);
2670   // and make sure anything that tries to add it's DFS number is
2671   // redirected to the instruction we are making a phi of ops
2672   // for.
2673   TempToBlock.insert({TransInst, PredBB});
2674   InstrDFS.insert({TransInst, IDFSNum});
2675 
2676   auto Res = performSymbolicEvaluation(TransInst, Visited);
2677   const Expression *E = Res.Expr;
2678   addAdditionalUsers(Res, OrigInst);
2679   InstrDFS.erase(TransInst);
2680   AllTempInstructions.erase(TransInst);
2681   TempToBlock.erase(TransInst);
2682   if (MemAccess)
2683     TempToMemory.erase(TransInst);
2684   if (!E)
2685     return nullptr;
2686   auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB);
2687   if (!FoundVal) {
2688     ExpressionToPhiOfOps[E].insert(OrigInst);
2689     LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst
2690                       << " in block " << getBlockName(PredBB) << "\n");
2691     return nullptr;
2692   }
2693   if (auto *SI = dyn_cast<StoreInst>(FoundVal))
2694     FoundVal = SI->getValueOperand();
2695   return FoundVal;
2696 }
2697 
2698 // When we see an instruction that is an op of phis, generate the equivalent phi
2699 // of ops form.
2700 const Expression *
2701 NewGVN::makePossiblePHIOfOps(Instruction *I,
2702                              SmallPtrSetImpl<Value *> &Visited) {
2703   if (!okayForPHIOfOps(I))
2704     return nullptr;
2705 
2706   if (!Visited.insert(I).second)
2707     return nullptr;
2708   // For now, we require the instruction be cycle free because we don't
2709   // *always* create a phi of ops for instructions that could be done as phi
2710   // of ops, we only do it if we think it is useful.  If we did do it all the
2711   // time, we could remove the cycle free check.
2712   if (!isCycleFree(I))
2713     return nullptr;
2714 
2715   SmallPtrSet<const Value *, 8> ProcessedPHIs;
2716   // TODO: We don't do phi translation on memory accesses because it's
2717   // complicated. For a load, we'd need to be able to simulate a new memoryuse,
2718   // which we don't have a good way of doing ATM.
2719   auto *MemAccess = getMemoryAccess(I);
2720   // If the memory operation is defined by a memory operation this block that
2721   // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi
2722   // can't help, as it would still be killed by that memory operation.
2723   if (MemAccess && !isa<MemoryPhi>(MemAccess->getDefiningAccess()) &&
2724       MemAccess->getDefiningAccess()->getBlock() == I->getParent())
2725     return nullptr;
2726 
2727   // Convert op of phis to phi of ops
2728   SmallPtrSet<const Value *, 10> VisitedOps;
2729   SmallVector<Value *, 4> Ops(I->operand_values());
2730   BasicBlock *SamePHIBlock = nullptr;
2731   PHINode *OpPHI = nullptr;
2732   if (!DebugCounter::shouldExecute(PHIOfOpsCounter))
2733     return nullptr;
2734   for (auto *Op : Ops) {
2735     if (!isa<PHINode>(Op)) {
2736       auto *ValuePHI = RealToTemp.lookup(Op);
2737       if (!ValuePHI)
2738         continue;
2739       LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n");
2740       Op = ValuePHI;
2741     }
2742     OpPHI = cast<PHINode>(Op);
2743     if (!SamePHIBlock) {
2744       SamePHIBlock = getBlockForValue(OpPHI);
2745     } else if (SamePHIBlock != getBlockForValue(OpPHI)) {
2746       LLVM_DEBUG(
2747           dbgs()
2748           << "PHIs for operands are not all in the same block, aborting\n");
2749       return nullptr;
2750     }
2751     // No point in doing this for one-operand phis.
2752     // Since all PHIs for operands must be in the same block, then they must
2753     // have the same number of operands so we can just abort.
2754     if (OpPHI->getNumOperands() == 1)
2755       return nullptr;
2756   }
2757 
2758   if (!OpPHI)
2759     return nullptr;
2760 
2761   SmallVector<ValPair, 4> PHIOps;
2762   SmallPtrSet<Value *, 4> Deps;
2763   auto *PHIBlock = getBlockForValue(OpPHI);
2764   RevisitOnReachabilityChange[PHIBlock].reset(InstrToDFSNum(I));
2765   for (unsigned PredNum = 0; PredNum < OpPHI->getNumOperands(); ++PredNum) {
2766     auto *PredBB = OpPHI->getIncomingBlock(PredNum);
2767     Value *FoundVal = nullptr;
2768     SmallPtrSet<Value *, 4> CurrentDeps;
2769     // We could just skip unreachable edges entirely but it's tricky to do
2770     // with rewriting existing phi nodes.
2771     if (ReachableEdges.count({PredBB, PHIBlock})) {
2772       // Clone the instruction, create an expression from it that is
2773       // translated back into the predecessor, and see if we have a leader.
2774       Instruction *ValueOp = I->clone();
2775       if (MemAccess)
2776         TempToMemory.insert({ValueOp, MemAccess});
2777       bool SafeForPHIOfOps = true;
2778       VisitedOps.clear();
2779       for (auto &Op : ValueOp->operands()) {
2780         auto *OrigOp = &*Op;
2781         // When these operand changes, it could change whether there is a
2782         // leader for us or not, so we have to add additional users.
2783         if (isa<PHINode>(Op)) {
2784           Op = Op->DoPHITranslation(PHIBlock, PredBB);
2785           if (Op != OrigOp && Op != I)
2786             CurrentDeps.insert(Op);
2787         } else if (auto *ValuePHI = RealToTemp.lookup(Op)) {
2788           if (getBlockForValue(ValuePHI) == PHIBlock)
2789             Op = ValuePHI->getIncomingValueForBlock(PredBB);
2790         }
2791         // If we phi-translated the op, it must be safe.
2792         SafeForPHIOfOps =
2793             SafeForPHIOfOps &&
2794             (Op != OrigOp || OpIsSafeForPHIOfOps(Op, PHIBlock, VisitedOps));
2795       }
2796       // FIXME: For those things that are not safe we could generate
2797       // expressions all the way down, and see if this comes out to a
2798       // constant.  For anything where that is true, and unsafe, we should
2799       // have made a phi-of-ops (or value numbered it equivalent to something)
2800       // for the pieces already.
2801       FoundVal = !SafeForPHIOfOps ? nullptr
2802                                   : findLeaderForInst(ValueOp, Visited,
2803                                                       MemAccess, I, PredBB);
2804       ValueOp->deleteValue();
2805       if (!FoundVal) {
2806         // We failed to find a leader for the current ValueOp, but this might
2807         // change in case of the translated operands change.
2808         if (SafeForPHIOfOps)
2809           for (auto *Dep : CurrentDeps)
2810             addAdditionalUsers(Dep, I);
2811 
2812         return nullptr;
2813       }
2814       Deps.insert(CurrentDeps.begin(), CurrentDeps.end());
2815     } else {
2816       LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
2817                         << getBlockName(PredBB)
2818                         << " because the block is unreachable\n");
2819       FoundVal = PoisonValue::get(I->getType());
2820       RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I));
2821     }
2822 
2823     PHIOps.push_back({FoundVal, PredBB});
2824     LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in "
2825                       << getBlockName(PredBB) << "\n");
2826   }
2827   for (auto *Dep : Deps)
2828     addAdditionalUsers(Dep, I);
2829   sortPHIOps(PHIOps);
2830   auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock);
2831   if (isa<ConstantExpression>(E) || isa<VariableExpression>(E)) {
2832     LLVM_DEBUG(
2833         dbgs()
2834         << "Not creating real PHI of ops because it simplified to existing "
2835            "value or constant\n");
2836     // We have leaders for all operands, but do not create a real PHI node with
2837     // those leaders as operands, so the link between the operands and the
2838     // PHI-of-ops is not materialized in the IR. If any of those leaders
2839     // changes, the PHI-of-op may change also, so we need to add the operands as
2840     // additional users.
2841     for (auto &O : PHIOps)
2842       addAdditionalUsers(O.first, I);
2843 
2844     return E;
2845   }
2846   auto *ValuePHI = RealToTemp.lookup(I);
2847   bool NewPHI = false;
2848   if (!ValuePHI) {
2849     ValuePHI =
2850         PHINode::Create(I->getType(), OpPHI->getNumOperands(), "phiofops");
2851     addPhiOfOps(ValuePHI, PHIBlock, I);
2852     NewPHI = true;
2853     NumGVNPHIOfOpsCreated++;
2854   }
2855   if (NewPHI) {
2856     for (auto PHIOp : PHIOps)
2857       ValuePHI->addIncoming(PHIOp.first, PHIOp.second);
2858   } else {
2859     TempToBlock[ValuePHI] = PHIBlock;
2860     unsigned int i = 0;
2861     for (auto PHIOp : PHIOps) {
2862       ValuePHI->setIncomingValue(i, PHIOp.first);
2863       ValuePHI->setIncomingBlock(i, PHIOp.second);
2864       ++i;
2865     }
2866   }
2867   RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I));
2868   LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I
2869                     << "\n");
2870 
2871   return E;
2872 }
2873 
2874 // The algorithm initially places the values of the routine in the TOP
2875 // congruence class. The leader of TOP is the undetermined value `poison`.
2876 // When the algorithm has finished, values still in TOP are unreachable.
2877 void NewGVN::initializeCongruenceClasses(Function &F) {
2878   NextCongruenceNum = 0;
2879 
2880   // Note that even though we use the live on entry def as a representative
2881   // MemoryAccess, it is *not* the same as the actual live on entry def. We
2882   // have no real equivalent to poison for MemoryAccesses, and so we really
2883   // should be checking whether the MemoryAccess is top if we want to know if it
2884   // is equivalent to everything.  Otherwise, what this really signifies is that
2885   // the access "it reaches all the way back to the beginning of the function"
2886 
2887   // Initialize all other instructions to be in TOP class.
2888   TOPClass = createCongruenceClass(nullptr, nullptr);
2889   TOPClass->setMemoryLeader(MSSA->getLiveOnEntryDef());
2890   //  The live on entry def gets put into it's own class
2891   MemoryAccessToClass[MSSA->getLiveOnEntryDef()] =
2892       createMemoryClass(MSSA->getLiveOnEntryDef());
2893 
2894   for (auto *DTN : nodes(DT)) {
2895     BasicBlock *BB = DTN->getBlock();
2896     // All MemoryAccesses are equivalent to live on entry to start. They must
2897     // be initialized to something so that initial changes are noticed. For
2898     // the maximal answer, we initialize them all to be the same as
2899     // liveOnEntry.
2900     auto *MemoryBlockDefs = MSSA->getBlockDefs(BB);
2901     if (MemoryBlockDefs)
2902       for (const auto &Def : *MemoryBlockDefs) {
2903         MemoryAccessToClass[&Def] = TOPClass;
2904         auto *MD = dyn_cast<MemoryDef>(&Def);
2905         // Insert the memory phis into the member list.
2906         if (!MD) {
2907           const MemoryPhi *MP = cast<MemoryPhi>(&Def);
2908           TOPClass->memory_insert(MP);
2909           MemoryPhiState.insert({MP, MPS_TOP});
2910         }
2911 
2912         if (MD && isa<StoreInst>(MD->getMemoryInst()))
2913           TOPClass->incStoreCount();
2914       }
2915 
2916     // FIXME: This is trying to discover which instructions are uses of phi
2917     // nodes.  We should move this into one of the myriad of places that walk
2918     // all the operands already.
2919     for (auto &I : *BB) {
2920       if (isa<PHINode>(&I))
2921         for (auto *U : I.users())
2922           if (auto *UInst = dyn_cast<Instruction>(U))
2923             if (InstrToDFSNum(UInst) != 0 && okayForPHIOfOps(UInst))
2924               PHINodeUses.insert(UInst);
2925       // Don't insert void terminators into the class. We don't value number
2926       // them, and they just end up sitting in TOP.
2927       if (I.isTerminator() && I.getType()->isVoidTy())
2928         continue;
2929       TOPClass->insert(&I);
2930       ValueToClass[&I] = TOPClass;
2931     }
2932   }
2933 
2934   // Initialize arguments to be in their own unique congruence classes
2935   for (auto &FA : F.args())
2936     createSingletonCongruenceClass(&FA);
2937 }
2938 
2939 void NewGVN::cleanupTables() {
2940   for (CongruenceClass *&CC : CongruenceClasses) {
2941     LLVM_DEBUG(dbgs() << "Congruence class " << CC->getID() << " has "
2942                       << CC->size() << " members\n");
2943     // Make sure we delete the congruence class (probably worth switching to
2944     // a unique_ptr at some point.
2945     delete CC;
2946     CC = nullptr;
2947   }
2948 
2949   // Destroy the value expressions
2950   SmallVector<Instruction *, 8> TempInst(AllTempInstructions.begin(),
2951                                          AllTempInstructions.end());
2952   AllTempInstructions.clear();
2953 
2954   // We have to drop all references for everything first, so there are no uses
2955   // left as we delete them.
2956   for (auto *I : TempInst) {
2957     I->dropAllReferences();
2958   }
2959 
2960   while (!TempInst.empty()) {
2961     auto *I = TempInst.pop_back_val();
2962     I->deleteValue();
2963   }
2964 
2965   ValueToClass.clear();
2966   ArgRecycler.clear(ExpressionAllocator);
2967   ExpressionAllocator.Reset();
2968   CongruenceClasses.clear();
2969   ExpressionToClass.clear();
2970   ValueToExpression.clear();
2971   RealToTemp.clear();
2972   AdditionalUsers.clear();
2973   ExpressionToPhiOfOps.clear();
2974   TempToBlock.clear();
2975   TempToMemory.clear();
2976   PHINodeUses.clear();
2977   OpSafeForPHIOfOps.clear();
2978   ReachableBlocks.clear();
2979   ReachableEdges.clear();
2980 #ifndef NDEBUG
2981   ProcessedCount.clear();
2982 #endif
2983   InstrDFS.clear();
2984   InstructionsToErase.clear();
2985   DFSToInstr.clear();
2986   BlockInstRange.clear();
2987   TouchedInstructions.clear();
2988   MemoryAccessToClass.clear();
2989   PredicateToUsers.clear();
2990   MemoryToUsers.clear();
2991   RevisitOnReachabilityChange.clear();
2992   IntrinsicInstPred.clear();
2993 }
2994 
2995 // Assign local DFS number mapping to instructions, and leave space for Value
2996 // PHI's.
2997 std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B,
2998                                                        unsigned Start) {
2999   unsigned End = Start;
3000   if (MemoryAccess *MemPhi = getMemoryAccess(B)) {
3001     InstrDFS[MemPhi] = End++;
3002     DFSToInstr.emplace_back(MemPhi);
3003   }
3004 
3005   // Then the real block goes next.
3006   for (auto &I : *B) {
3007     // There's no need to call isInstructionTriviallyDead more than once on
3008     // an instruction. Therefore, once we know that an instruction is dead
3009     // we change its DFS number so that it doesn't get value numbered.
3010     if (isInstructionTriviallyDead(&I, TLI)) {
3011       InstrDFS[&I] = 0;
3012       LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n");
3013       markInstructionForDeletion(&I);
3014       continue;
3015     }
3016     if (isa<PHINode>(&I))
3017       RevisitOnReachabilityChange[B].set(End);
3018     InstrDFS[&I] = End++;
3019     DFSToInstr.emplace_back(&I);
3020   }
3021 
3022   // All of the range functions taken half-open ranges (open on the end side).
3023   // So we do not subtract one from count, because at this point it is one
3024   // greater than the last instruction.
3025   return std::make_pair(Start, End);
3026 }
3027 
3028 void NewGVN::updateProcessedCount(const Value *V) {
3029 #ifndef NDEBUG
3030   if (ProcessedCount.count(V) == 0) {
3031     ProcessedCount.insert({V, 1});
3032   } else {
3033     ++ProcessedCount[V];
3034     assert(ProcessedCount[V] < 100 &&
3035            "Seem to have processed the same Value a lot");
3036   }
3037 #endif
3038 }
3039 
3040 // Evaluate MemoryPhi nodes symbolically, just like PHI nodes
3041 void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) {
3042   // If all the arguments are the same, the MemoryPhi has the same value as the
3043   // argument.  Filter out unreachable blocks and self phis from our operands.
3044   // TODO: We could do cycle-checking on the memory phis to allow valueizing for
3045   // self-phi checking.
3046   const BasicBlock *PHIBlock = MP->getBlock();
3047   auto Filtered = make_filter_range(MP->operands(), [&](const Use &U) {
3048     return cast<MemoryAccess>(U) != MP &&
3049            !isMemoryAccessTOP(cast<MemoryAccess>(U)) &&
3050            ReachableEdges.count({MP->getIncomingBlock(U), PHIBlock});
3051   });
3052   // If all that is left is nothing, our memoryphi is poison. We keep it as
3053   // InitialClass.  Note: The only case this should happen is if we have at
3054   // least one self-argument.
3055   if (Filtered.begin() == Filtered.end()) {
3056     if (setMemoryClass(MP, TOPClass))
3057       markMemoryUsersTouched(MP);
3058     return;
3059   }
3060 
3061   // Transform the remaining operands into operand leaders.
3062   // FIXME: mapped_iterator should have a range version.
3063   auto LookupFunc = [&](const Use &U) {
3064     return lookupMemoryLeader(cast<MemoryAccess>(U));
3065   };
3066   auto MappedBegin = map_iterator(Filtered.begin(), LookupFunc);
3067   auto MappedEnd = map_iterator(Filtered.end(), LookupFunc);
3068 
3069   // and now check if all the elements are equal.
3070   // Sadly, we can't use std::equals since these are random access iterators.
3071   const auto *AllSameValue = *MappedBegin;
3072   ++MappedBegin;
3073   bool AllEqual = std::all_of(
3074       MappedBegin, MappedEnd,
3075       [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; });
3076 
3077   if (AllEqual)
3078     LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue
3079                       << "\n");
3080   else
3081     LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n");
3082   // If it's equal to something, it's in that class. Otherwise, it has to be in
3083   // a class where it is the leader (other things may be equivalent to it, but
3084   // it needs to start off in its own class, which means it must have been the
3085   // leader, and it can't have stopped being the leader because it was never
3086   // removed).
3087   CongruenceClass *CC =
3088       AllEqual ? getMemoryClass(AllSameValue) : ensureLeaderOfMemoryClass(MP);
3089   auto OldState = MemoryPhiState.lookup(MP);
3090   assert(OldState != MPS_Invalid && "Invalid memory phi state");
3091   auto NewState = AllEqual ? MPS_Equivalent : MPS_Unique;
3092   MemoryPhiState[MP] = NewState;
3093   if (setMemoryClass(MP, CC) || OldState != NewState)
3094     markMemoryUsersTouched(MP);
3095 }
3096 
3097 // Value number a single instruction, symbolically evaluating, performing
3098 // congruence finding, and updating mappings.
3099 void NewGVN::valueNumberInstruction(Instruction *I) {
3100   LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n");
3101   if (!I->isTerminator()) {
3102     const Expression *Symbolized = nullptr;
3103     SmallPtrSet<Value *, 2> Visited;
3104     if (DebugCounter::shouldExecute(VNCounter)) {
3105       auto Res = performSymbolicEvaluation(I, Visited);
3106       Symbolized = Res.Expr;
3107       addAdditionalUsers(Res, I);
3108 
3109       // Make a phi of ops if necessary
3110       if (Symbolized && !isa<ConstantExpression>(Symbolized) &&
3111           !isa<VariableExpression>(Symbolized) && PHINodeUses.count(I)) {
3112         auto *PHIE = makePossiblePHIOfOps(I, Visited);
3113         // If we created a phi of ops, use it.
3114         // If we couldn't create one, make sure we don't leave one lying around
3115         if (PHIE) {
3116           Symbolized = PHIE;
3117         } else if (auto *Op = RealToTemp.lookup(I)) {
3118           removePhiOfOps(I, Op);
3119         }
3120       }
3121     } else {
3122       // Mark the instruction as unused so we don't value number it again.
3123       InstrDFS[I] = 0;
3124     }
3125     // If we couldn't come up with a symbolic expression, use the unknown
3126     // expression
3127     if (Symbolized == nullptr)
3128       Symbolized = createUnknownExpression(I);
3129     performCongruenceFinding(I, Symbolized);
3130   } else {
3131     // Handle terminators that return values. All of them produce values we
3132     // don't currently understand.  We don't place non-value producing
3133     // terminators in a class.
3134     if (!I->getType()->isVoidTy()) {
3135       auto *Symbolized = createUnknownExpression(I);
3136       performCongruenceFinding(I, Symbolized);
3137     }
3138     processOutgoingEdges(I, I->getParent());
3139   }
3140 }
3141 
3142 // Check if there is a path, using single or equal argument phi nodes, from
3143 // First to Second.
3144 bool NewGVN::singleReachablePHIPath(
3145     SmallPtrSet<const MemoryAccess *, 8> &Visited, const MemoryAccess *First,
3146     const MemoryAccess *Second) const {
3147   if (First == Second)
3148     return true;
3149   if (MSSA->isLiveOnEntryDef(First))
3150     return false;
3151 
3152   // This is not perfect, but as we're just verifying here, we can live with
3153   // the loss of precision. The real solution would be that of doing strongly
3154   // connected component finding in this routine, and it's probably not worth
3155   // the complexity for the time being. So, we just keep a set of visited
3156   // MemoryAccess and return true when we hit a cycle.
3157   if (!Visited.insert(First).second)
3158     return true;
3159 
3160   const auto *EndDef = First;
3161   for (const auto *ChainDef : optimized_def_chain(First)) {
3162     if (ChainDef == Second)
3163       return true;
3164     if (MSSA->isLiveOnEntryDef(ChainDef))
3165       return false;
3166     EndDef = ChainDef;
3167   }
3168   auto *MP = cast<MemoryPhi>(EndDef);
3169   auto ReachableOperandPred = [&](const Use &U) {
3170     return ReachableEdges.count({MP->getIncomingBlock(U), MP->getBlock()});
3171   };
3172   auto FilteredPhiArgs =
3173       make_filter_range(MP->operands(), ReachableOperandPred);
3174   SmallVector<const Value *, 32> OperandList;
3175   llvm::copy(FilteredPhiArgs, std::back_inserter(OperandList));
3176   bool Okay = all_equal(OperandList);
3177   if (Okay)
3178     return singleReachablePHIPath(Visited, cast<MemoryAccess>(OperandList[0]),
3179                                   Second);
3180   return false;
3181 }
3182 
3183 // Verify the that the memory equivalence table makes sense relative to the
3184 // congruence classes.  Note that this checking is not perfect, and is currently
3185 // subject to very rare false negatives. It is only useful for
3186 // testing/debugging.
3187 void NewGVN::verifyMemoryCongruency() const {
3188 #ifndef NDEBUG
3189   // Verify that the memory table equivalence and memory member set match
3190   for (const auto *CC : CongruenceClasses) {
3191     if (CC == TOPClass || CC->isDead())
3192       continue;
3193     if (CC->getStoreCount() != 0) {
3194       assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) &&
3195              "Any class with a store as a leader should have a "
3196              "representative stored value");
3197       assert(CC->getMemoryLeader() &&
3198              "Any congruence class with a store should have a "
3199              "representative access");
3200     }
3201 
3202     if (CC->getMemoryLeader())
3203       assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC &&
3204              "Representative MemoryAccess does not appear to be reverse "
3205              "mapped properly");
3206     for (const auto *M : CC->memory())
3207       assert(MemoryAccessToClass.lookup(M) == CC &&
3208              "Memory member does not appear to be reverse mapped properly");
3209   }
3210 
3211   // Anything equivalent in the MemoryAccess table should be in the same
3212   // congruence class.
3213 
3214   // Filter out the unreachable and trivially dead entries, because they may
3215   // never have been updated if the instructions were not processed.
3216   auto ReachableAccessPred =
3217       [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) {
3218         bool Result = ReachableBlocks.count(Pair.first->getBlock());
3219         if (!Result || MSSA->isLiveOnEntryDef(Pair.first) ||
3220             MemoryToDFSNum(Pair.first) == 0)
3221           return false;
3222         if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first))
3223           return !isInstructionTriviallyDead(MemDef->getMemoryInst());
3224 
3225         // We could have phi nodes which operands are all trivially dead,
3226         // so we don't process them.
3227         if (auto *MemPHI = dyn_cast<MemoryPhi>(Pair.first)) {
3228           for (const auto &U : MemPHI->incoming_values()) {
3229             if (auto *I = dyn_cast<Instruction>(&*U)) {
3230               if (!isInstructionTriviallyDead(I))
3231                 return true;
3232             }
3233           }
3234           return false;
3235         }
3236 
3237         return true;
3238       };
3239 
3240   auto Filtered = make_filter_range(MemoryAccessToClass, ReachableAccessPred);
3241   for (auto KV : Filtered) {
3242     if (auto *FirstMUD = dyn_cast<MemoryUseOrDef>(KV.first)) {
3243       auto *SecondMUD = dyn_cast<MemoryUseOrDef>(KV.second->getMemoryLeader());
3244       if (FirstMUD && SecondMUD) {
3245         SmallPtrSet<const MemoryAccess *, 8> VisitedMAS;
3246         assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) ||
3247                 ValueToClass.lookup(FirstMUD->getMemoryInst()) ==
3248                     ValueToClass.lookup(SecondMUD->getMemoryInst())) &&
3249                "The instructions for these memory operations should have "
3250                "been in the same congruence class or reachable through"
3251                "a single argument phi");
3252       }
3253     } else if (auto *FirstMP = dyn_cast<MemoryPhi>(KV.first)) {
3254       // We can only sanely verify that MemoryDefs in the operand list all have
3255       // the same class.
3256       auto ReachableOperandPred = [&](const Use &U) {
3257         return ReachableEdges.count(
3258                    {FirstMP->getIncomingBlock(U), FirstMP->getBlock()}) &&
3259                isa<MemoryDef>(U);
3260 
3261       };
3262       // All arguments should in the same class, ignoring unreachable arguments
3263       auto FilteredPhiArgs =
3264           make_filter_range(FirstMP->operands(), ReachableOperandPred);
3265       SmallVector<const CongruenceClass *, 16> PhiOpClasses;
3266       std::transform(FilteredPhiArgs.begin(), FilteredPhiArgs.end(),
3267                      std::back_inserter(PhiOpClasses), [&](const Use &U) {
3268                        const MemoryDef *MD = cast<MemoryDef>(U);
3269                        return ValueToClass.lookup(MD->getMemoryInst());
3270                      });
3271       assert(all_equal(PhiOpClasses) &&
3272              "All MemoryPhi arguments should be in the same class");
3273     }
3274   }
3275 #endif
3276 }
3277 
3278 // Verify that the sparse propagation we did actually found the maximal fixpoint
3279 // We do this by storing the value to class mapping, touching all instructions,
3280 // and redoing the iteration to see if anything changed.
3281 void NewGVN::verifyIterationSettled(Function &F) {
3282 #ifndef NDEBUG
3283   LLVM_DEBUG(dbgs() << "Beginning iteration verification\n");
3284   if (DebugCounter::isCounterSet(VNCounter))
3285     DebugCounter::setCounterValue(VNCounter, StartingVNCounter);
3286 
3287   // Note that we have to store the actual classes, as we may change existing
3288   // classes during iteration.  This is because our memory iteration propagation
3289   // is not perfect, and so may waste a little work.  But it should generate
3290   // exactly the same congruence classes we have now, with different IDs.
3291   std::map<const Value *, CongruenceClass> BeforeIteration;
3292 
3293   for (auto &KV : ValueToClass) {
3294     if (auto *I = dyn_cast<Instruction>(KV.first))
3295       // Skip unused/dead instructions.
3296       if (InstrToDFSNum(I) == 0)
3297         continue;
3298     BeforeIteration.insert({KV.first, *KV.second});
3299   }
3300 
3301   TouchedInstructions.set();
3302   TouchedInstructions.reset(0);
3303   OpSafeForPHIOfOps.clear();
3304   iterateTouchedInstructions();
3305   DenseSet<std::pair<const CongruenceClass *, const CongruenceClass *>>
3306       EqualClasses;
3307   for (const auto &KV : ValueToClass) {
3308     if (auto *I = dyn_cast<Instruction>(KV.first))
3309       // Skip unused/dead instructions.
3310       if (InstrToDFSNum(I) == 0)
3311         continue;
3312     // We could sink these uses, but i think this adds a bit of clarity here as
3313     // to what we are comparing.
3314     auto *BeforeCC = &BeforeIteration.find(KV.first)->second;
3315     auto *AfterCC = KV.second;
3316     // Note that the classes can't change at this point, so we memoize the set
3317     // that are equal.
3318     if (!EqualClasses.count({BeforeCC, AfterCC})) {
3319       assert(BeforeCC->isEquivalentTo(AfterCC) &&
3320              "Value number changed after main loop completed!");
3321       EqualClasses.insert({BeforeCC, AfterCC});
3322     }
3323   }
3324 #endif
3325 }
3326 
3327 // Verify that for each store expression in the expression to class mapping,
3328 // only the latest appears, and multiple ones do not appear.
3329 // Because loads do not use the stored value when doing equality with stores,
3330 // if we don't erase the old store expressions from the table, a load can find
3331 // a no-longer valid StoreExpression.
3332 void NewGVN::verifyStoreExpressions() const {
3333 #ifndef NDEBUG
3334   // This is the only use of this, and it's not worth defining a complicated
3335   // densemapinfo hash/equality function for it.
3336   std::set<
3337       std::pair<const Value *,
3338                 std::tuple<const Value *, const CongruenceClass *, Value *>>>
3339       StoreExpressionSet;
3340   for (const auto &KV : ExpressionToClass) {
3341     if (auto *SE = dyn_cast<StoreExpression>(KV.first)) {
3342       // Make sure a version that will conflict with loads is not already there
3343       auto Res = StoreExpressionSet.insert(
3344           {SE->getOperand(0), std::make_tuple(SE->getMemoryLeader(), KV.second,
3345                                               SE->getStoredValue())});
3346       bool Okay = Res.second;
3347       // It's okay to have the same expression already in there if it is
3348       // identical in nature.
3349       // This can happen when the leader of the stored value changes over time.
3350       if (!Okay)
3351         Okay = (std::get<1>(Res.first->second) == KV.second) &&
3352                (lookupOperandLeader(std::get<2>(Res.first->second)) ==
3353                 lookupOperandLeader(SE->getStoredValue()));
3354       assert(Okay && "Stored expression conflict exists in expression table");
3355       auto *ValueExpr = ValueToExpression.lookup(SE->getStoreInst());
3356       assert(ValueExpr && ValueExpr->equals(*SE) &&
3357              "StoreExpression in ExpressionToClass is not latest "
3358              "StoreExpression for value");
3359     }
3360   }
3361 #endif
3362 }
3363 
3364 // This is the main value numbering loop, it iterates over the initial touched
3365 // instruction set, propagating value numbers, marking things touched, etc,
3366 // until the set of touched instructions is completely empty.
3367 void NewGVN::iterateTouchedInstructions() {
3368   uint64_t Iterations = 0;
3369   // Figure out where touchedinstructions starts
3370   int FirstInstr = TouchedInstructions.find_first();
3371   // Nothing set, nothing to iterate, just return.
3372   if (FirstInstr == -1)
3373     return;
3374   const BasicBlock *LastBlock = getBlockForValue(InstrFromDFSNum(FirstInstr));
3375   while (TouchedInstructions.any()) {
3376     ++Iterations;
3377     // Walk through all the instructions in all the blocks in RPO.
3378     // TODO: As we hit a new block, we should push and pop equalities into a
3379     // table lookupOperandLeader can use, to catch things PredicateInfo
3380     // might miss, like edge-only equivalences.
3381     for (unsigned InstrNum : TouchedInstructions.set_bits()) {
3382 
3383       // This instruction was found to be dead. We don't bother looking
3384       // at it again.
3385       if (InstrNum == 0) {
3386         TouchedInstructions.reset(InstrNum);
3387         continue;
3388       }
3389 
3390       Value *V = InstrFromDFSNum(InstrNum);
3391       const BasicBlock *CurrBlock = getBlockForValue(V);
3392 
3393       // If we hit a new block, do reachability processing.
3394       if (CurrBlock != LastBlock) {
3395         LastBlock = CurrBlock;
3396         bool BlockReachable = ReachableBlocks.count(CurrBlock);
3397         const auto &CurrInstRange = BlockInstRange.lookup(CurrBlock);
3398 
3399         // If it's not reachable, erase any touched instructions and move on.
3400         if (!BlockReachable) {
3401           TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second);
3402           LLVM_DEBUG(dbgs() << "Skipping instructions in block "
3403                             << getBlockName(CurrBlock)
3404                             << " because it is unreachable\n");
3405           continue;
3406         }
3407         updateProcessedCount(CurrBlock);
3408       }
3409       // Reset after processing (because we may mark ourselves as touched when
3410       // we propagate equalities).
3411       TouchedInstructions.reset(InstrNum);
3412 
3413       if (auto *MP = dyn_cast<MemoryPhi>(V)) {
3414         LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n");
3415         valueNumberMemoryPhi(MP);
3416       } else if (auto *I = dyn_cast<Instruction>(V)) {
3417         valueNumberInstruction(I);
3418       } else {
3419         llvm_unreachable("Should have been a MemoryPhi or Instruction");
3420       }
3421       updateProcessedCount(V);
3422     }
3423   }
3424   NumGVNMaxIterations = std::max(NumGVNMaxIterations.getValue(), Iterations);
3425 }
3426 
3427 // This is the main transformation entry point.
3428 bool NewGVN::runGVN() {
3429   if (DebugCounter::isCounterSet(VNCounter))
3430     StartingVNCounter = DebugCounter::getCounterValue(VNCounter);
3431   bool Changed = false;
3432   NumFuncArgs = F.arg_size();
3433   MSSAWalker = MSSA->getWalker();
3434   SingletonDeadExpression = new (ExpressionAllocator) DeadExpression();
3435 
3436   // Count number of instructions for sizing of hash tables, and come
3437   // up with a global dfs numbering for instructions.
3438   unsigned ICount = 1;
3439   // Add an empty instruction to account for the fact that we start at 1
3440   DFSToInstr.emplace_back(nullptr);
3441   // Note: We want ideal RPO traversal of the blocks, which is not quite the
3442   // same as dominator tree order, particularly with regard whether backedges
3443   // get visited first or second, given a block with multiple successors.
3444   // If we visit in the wrong order, we will end up performing N times as many
3445   // iterations.
3446   // The dominator tree does guarantee that, for a given dom tree node, it's
3447   // parent must occur before it in the RPO ordering. Thus, we only need to sort
3448   // the siblings.
3449   ReversePostOrderTraversal<Function *> RPOT(&F);
3450   unsigned Counter = 0;
3451   for (auto &B : RPOT) {
3452     auto *Node = DT->getNode(B);
3453     assert(Node && "RPO and Dominator tree should have same reachability");
3454     RPOOrdering[Node] = ++Counter;
3455   }
3456   // Sort dominator tree children arrays into RPO.
3457   for (auto &B : RPOT) {
3458     auto *Node = DT->getNode(B);
3459     if (Node->getNumChildren() > 1)
3460       llvm::sort(*Node, [&](const DomTreeNode *A, const DomTreeNode *B) {
3461         return RPOOrdering[A] < RPOOrdering[B];
3462       });
3463   }
3464 
3465   // Now a standard depth first ordering of the domtree is equivalent to RPO.
3466   for (auto *DTN : depth_first(DT->getRootNode())) {
3467     BasicBlock *B = DTN->getBlock();
3468     const auto &BlockRange = assignDFSNumbers(B, ICount);
3469     BlockInstRange.insert({B, BlockRange});
3470     ICount += BlockRange.second - BlockRange.first;
3471   }
3472   initializeCongruenceClasses(F);
3473 
3474   TouchedInstructions.resize(ICount);
3475   // Ensure we don't end up resizing the expressionToClass map, as
3476   // that can be quite expensive. At most, we have one expression per
3477   // instruction.
3478   ExpressionToClass.reserve(ICount);
3479 
3480   // Initialize the touched instructions to include the entry block.
3481   const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock());
3482   TouchedInstructions.set(InstRange.first, InstRange.second);
3483   LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock())
3484                     << " marked reachable\n");
3485   ReachableBlocks.insert(&F.getEntryBlock());
3486 
3487   iterateTouchedInstructions();
3488   verifyMemoryCongruency();
3489   verifyIterationSettled(F);
3490   verifyStoreExpressions();
3491 
3492   Changed |= eliminateInstructions(F);
3493 
3494   // Delete all instructions marked for deletion.
3495   for (Instruction *ToErase : InstructionsToErase) {
3496     if (!ToErase->use_empty())
3497       ToErase->replaceAllUsesWith(PoisonValue::get(ToErase->getType()));
3498 
3499     assert(ToErase->getParent() &&
3500            "BB containing ToErase deleted unexpectedly!");
3501     ToErase->eraseFromParent();
3502   }
3503   Changed |= !InstructionsToErase.empty();
3504 
3505   // Delete all unreachable blocks.
3506   auto UnreachableBlockPred = [&](const BasicBlock &BB) {
3507     return !ReachableBlocks.count(&BB);
3508   };
3509 
3510   for (auto &BB : make_filter_range(F, UnreachableBlockPred)) {
3511     LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB)
3512                       << " is unreachable\n");
3513     deleteInstructionsInBlock(&BB);
3514     Changed = true;
3515   }
3516 
3517   cleanupTables();
3518   return Changed;
3519 }
3520 
3521 struct NewGVN::ValueDFS {
3522   int DFSIn = 0;
3523   int DFSOut = 0;
3524   int LocalNum = 0;
3525 
3526   // Only one of Def and U will be set.
3527   // The bool in the Def tells us whether the Def is the stored value of a
3528   // store.
3529   PointerIntPair<Value *, 1, bool> Def;
3530   Use *U = nullptr;
3531 
3532   bool operator<(const ValueDFS &Other) const {
3533     // It's not enough that any given field be less than - we have sets
3534     // of fields that need to be evaluated together to give a proper ordering.
3535     // For example, if you have;
3536     // DFS (1, 3)
3537     // Val 0
3538     // DFS (1, 2)
3539     // Val 50
3540     // We want the second to be less than the first, but if we just go field
3541     // by field, we will get to Val 0 < Val 50 and say the first is less than
3542     // the second. We only want it to be less than if the DFS orders are equal.
3543     //
3544     // Each LLVM instruction only produces one value, and thus the lowest-level
3545     // differentiator that really matters for the stack (and what we use as as a
3546     // replacement) is the local dfs number.
3547     // Everything else in the structure is instruction level, and only affects
3548     // the order in which we will replace operands of a given instruction.
3549     //
3550     // For a given instruction (IE things with equal dfsin, dfsout, localnum),
3551     // the order of replacement of uses does not matter.
3552     // IE given,
3553     //  a = 5
3554     //  b = a + a
3555     // When you hit b, you will have two valuedfs with the same dfsin, out, and
3556     // localnum.
3557     // The .val will be the same as well.
3558     // The .u's will be different.
3559     // You will replace both, and it does not matter what order you replace them
3560     // in (IE whether you replace operand 2, then operand 1, or operand 1, then
3561     // operand 2).
3562     // Similarly for the case of same dfsin, dfsout, localnum, but different
3563     // .val's
3564     //  a = 5
3565     //  b  = 6
3566     //  c = a + b
3567     // in c, we will a valuedfs for a, and one for b,with everything the same
3568     // but .val  and .u.
3569     // It does not matter what order we replace these operands in.
3570     // You will always end up with the same IR, and this is guaranteed.
3571     return std::tie(DFSIn, DFSOut, LocalNum, Def, U) <
3572            std::tie(Other.DFSIn, Other.DFSOut, Other.LocalNum, Other.Def,
3573                     Other.U);
3574   }
3575 };
3576 
3577 // This function converts the set of members for a congruence class from values,
3578 // to sets of defs and uses with associated DFS info.  The total number of
3579 // reachable uses for each value is stored in UseCount, and instructions that
3580 // seem
3581 // dead (have no non-dead uses) are stored in ProbablyDead.
3582 void NewGVN::convertClassToDFSOrdered(
3583     const CongruenceClass &Dense, SmallVectorImpl<ValueDFS> &DFSOrderedSet,
3584     DenseMap<const Value *, unsigned int> &UseCounts,
3585     SmallPtrSetImpl<Instruction *> &ProbablyDead) const {
3586   for (auto *D : Dense) {
3587     // First add the value.
3588     BasicBlock *BB = getBlockForValue(D);
3589     // Constants are handled prior to ever calling this function, so
3590     // we should only be left with instructions as members.
3591     assert(BB && "Should have figured out a basic block for value");
3592     ValueDFS VDDef;
3593     DomTreeNode *DomNode = DT->getNode(BB);
3594     VDDef.DFSIn = DomNode->getDFSNumIn();
3595     VDDef.DFSOut = DomNode->getDFSNumOut();
3596     // If it's a store, use the leader of the value operand, if it's always
3597     // available, or the value operand.  TODO: We could do dominance checks to
3598     // find a dominating leader, but not worth it ATM.
3599     if (auto *SI = dyn_cast<StoreInst>(D)) {
3600       auto Leader = lookupOperandLeader(SI->getValueOperand());
3601       if (alwaysAvailable(Leader)) {
3602         VDDef.Def.setPointer(Leader);
3603       } else {
3604         VDDef.Def.setPointer(SI->getValueOperand());
3605         VDDef.Def.setInt(true);
3606       }
3607     } else {
3608       VDDef.Def.setPointer(D);
3609     }
3610     assert(isa<Instruction>(D) &&
3611            "The dense set member should always be an instruction");
3612     Instruction *Def = cast<Instruction>(D);
3613     VDDef.LocalNum = InstrToDFSNum(D);
3614     DFSOrderedSet.push_back(VDDef);
3615     // If there is a phi node equivalent, add it
3616     if (auto *PN = RealToTemp.lookup(Def)) {
3617       auto *PHIE =
3618           dyn_cast_or_null<PHIExpression>(ValueToExpression.lookup(Def));
3619       if (PHIE) {
3620         VDDef.Def.setInt(false);
3621         VDDef.Def.setPointer(PN);
3622         VDDef.LocalNum = 0;
3623         DFSOrderedSet.push_back(VDDef);
3624       }
3625     }
3626 
3627     unsigned int UseCount = 0;
3628     // Now add the uses.
3629     for (auto &U : Def->uses()) {
3630       if (auto *I = dyn_cast<Instruction>(U.getUser())) {
3631         // Don't try to replace into dead uses
3632         if (InstructionsToErase.count(I))
3633           continue;
3634         ValueDFS VDUse;
3635         // Put the phi node uses in the incoming block.
3636         BasicBlock *IBlock;
3637         if (auto *P = dyn_cast<PHINode>(I)) {
3638           IBlock = P->getIncomingBlock(U);
3639           // Make phi node users appear last in the incoming block
3640           // they are from.
3641           VDUse.LocalNum = InstrDFS.size() + 1;
3642         } else {
3643           IBlock = getBlockForValue(I);
3644           VDUse.LocalNum = InstrToDFSNum(I);
3645         }
3646 
3647         // Skip uses in unreachable blocks, as we're going
3648         // to delete them.
3649         if (!ReachableBlocks.contains(IBlock))
3650           continue;
3651 
3652         DomTreeNode *DomNode = DT->getNode(IBlock);
3653         VDUse.DFSIn = DomNode->getDFSNumIn();
3654         VDUse.DFSOut = DomNode->getDFSNumOut();
3655         VDUse.U = &U;
3656         ++UseCount;
3657         DFSOrderedSet.emplace_back(VDUse);
3658       }
3659     }
3660 
3661     // If there are no uses, it's probably dead (but it may have side-effects,
3662     // so not definitely dead. Otherwise, store the number of uses so we can
3663     // track if it becomes dead later).
3664     if (UseCount == 0)
3665       ProbablyDead.insert(Def);
3666     else
3667       UseCounts[Def] = UseCount;
3668   }
3669 }
3670 
3671 // This function converts the set of members for a congruence class from values,
3672 // to the set of defs for loads and stores, with associated DFS info.
3673 void NewGVN::convertClassToLoadsAndStores(
3674     const CongruenceClass &Dense,
3675     SmallVectorImpl<ValueDFS> &LoadsAndStores) const {
3676   for (auto *D : Dense) {
3677     if (!isa<LoadInst>(D) && !isa<StoreInst>(D))
3678       continue;
3679 
3680     BasicBlock *BB = getBlockForValue(D);
3681     ValueDFS VD;
3682     DomTreeNode *DomNode = DT->getNode(BB);
3683     VD.DFSIn = DomNode->getDFSNumIn();
3684     VD.DFSOut = DomNode->getDFSNumOut();
3685     VD.Def.setPointer(D);
3686 
3687     // If it's an instruction, use the real local dfs number.
3688     if (auto *I = dyn_cast<Instruction>(D))
3689       VD.LocalNum = InstrToDFSNum(I);
3690     else
3691       llvm_unreachable("Should have been an instruction");
3692 
3693     LoadsAndStores.emplace_back(VD);
3694   }
3695 }
3696 
3697 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
3698   patchReplacementInstruction(I, Repl);
3699   I->replaceAllUsesWith(Repl);
3700 }
3701 
3702 void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
3703   LLVM_DEBUG(dbgs() << "  BasicBlock Dead:" << *BB);
3704   ++NumGVNBlocksDeleted;
3705 
3706   // Delete the instructions backwards, as it has a reduced likelihood of having
3707   // to update as many def-use and use-def chains. Start after the terminator.
3708   auto StartPoint = BB->rbegin();
3709   ++StartPoint;
3710   // Note that we explicitly recalculate BB->rend() on each iteration,
3711   // as it may change when we remove the first instruction.
3712   for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) {
3713     Instruction &Inst = *I++;
3714     if (!Inst.use_empty())
3715       Inst.replaceAllUsesWith(PoisonValue::get(Inst.getType()));
3716     if (isa<LandingPadInst>(Inst))
3717       continue;
3718     salvageKnowledge(&Inst, AC);
3719 
3720     Inst.eraseFromParent();
3721     ++NumGVNInstrDeleted;
3722   }
3723   // Now insert something that simplifycfg will turn into an unreachable.
3724   Type *Int8Ty = Type::getInt8Ty(BB->getContext());
3725   new StoreInst(
3726       PoisonValue::get(Int8Ty),
3727       Constant::getNullValue(PointerType::getUnqual(BB->getContext())),
3728       BB->getTerminator());
3729 }
3730 
3731 void NewGVN::markInstructionForDeletion(Instruction *I) {
3732   LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n");
3733   InstructionsToErase.insert(I);
3734 }
3735 
3736 void NewGVN::replaceInstruction(Instruction *I, Value *V) {
3737   LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n");
3738   patchAndReplaceAllUsesWith(I, V);
3739   // We save the actual erasing to avoid invalidating memory
3740   // dependencies until we are done with everything.
3741   markInstructionForDeletion(I);
3742 }
3743 
3744 namespace {
3745 
3746 // This is a stack that contains both the value and dfs info of where
3747 // that value is valid.
3748 class ValueDFSStack {
3749 public:
3750   Value *back() const { return ValueStack.back(); }
3751   std::pair<int, int> dfs_back() const { return DFSStack.back(); }
3752 
3753   void push_back(Value *V, int DFSIn, int DFSOut) {
3754     ValueStack.emplace_back(V);
3755     DFSStack.emplace_back(DFSIn, DFSOut);
3756   }
3757 
3758   bool empty() const { return DFSStack.empty(); }
3759 
3760   bool isInScope(int DFSIn, int DFSOut) const {
3761     if (empty())
3762       return false;
3763     return DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second;
3764   }
3765 
3766   void popUntilDFSScope(int DFSIn, int DFSOut) {
3767 
3768     // These two should always be in sync at this point.
3769     assert(ValueStack.size() == DFSStack.size() &&
3770            "Mismatch between ValueStack and DFSStack");
3771     while (
3772         !DFSStack.empty() &&
3773         !(DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second)) {
3774       DFSStack.pop_back();
3775       ValueStack.pop_back();
3776     }
3777   }
3778 
3779 private:
3780   SmallVector<Value *, 8> ValueStack;
3781   SmallVector<std::pair<int, int>, 8> DFSStack;
3782 };
3783 
3784 } // end anonymous namespace
3785 
3786 // Given an expression, get the congruence class for it.
3787 CongruenceClass *NewGVN::getClassForExpression(const Expression *E) const {
3788   if (auto *VE = dyn_cast<VariableExpression>(E))
3789     return ValueToClass.lookup(VE->getVariableValue());
3790   else if (isa<DeadExpression>(E))
3791     return TOPClass;
3792   return ExpressionToClass.lookup(E);
3793 }
3794 
3795 // Given a value and a basic block we are trying to see if it is available in,
3796 // see if the value has a leader available in that block.
3797 Value *NewGVN::findPHIOfOpsLeader(const Expression *E,
3798                                   const Instruction *OrigInst,
3799                                   const BasicBlock *BB) const {
3800   // It would already be constant if we could make it constant
3801   if (auto *CE = dyn_cast<ConstantExpression>(E))
3802     return CE->getConstantValue();
3803   if (auto *VE = dyn_cast<VariableExpression>(E)) {
3804     auto *V = VE->getVariableValue();
3805     if (alwaysAvailable(V) || DT->dominates(getBlockForValue(V), BB))
3806       return VE->getVariableValue();
3807   }
3808 
3809   auto *CC = getClassForExpression(E);
3810   if (!CC)
3811     return nullptr;
3812   if (alwaysAvailable(CC->getLeader()))
3813     return CC->getLeader();
3814 
3815   for (auto *Member : *CC) {
3816     auto *MemberInst = dyn_cast<Instruction>(Member);
3817     if (MemberInst == OrigInst)
3818       continue;
3819     // Anything that isn't an instruction is always available.
3820     if (!MemberInst)
3821       return Member;
3822     if (DT->dominates(getBlockForValue(MemberInst), BB))
3823       return Member;
3824   }
3825   return nullptr;
3826 }
3827 
3828 bool NewGVN::eliminateInstructions(Function &F) {
3829   // This is a non-standard eliminator. The normal way to eliminate is
3830   // to walk the dominator tree in order, keeping track of available
3831   // values, and eliminating them.  However, this is mildly
3832   // pointless. It requires doing lookups on every instruction,
3833   // regardless of whether we will ever eliminate it.  For
3834   // instructions part of most singleton congruence classes, we know we
3835   // will never eliminate them.
3836 
3837   // Instead, this eliminator looks at the congruence classes directly, sorts
3838   // them into a DFS ordering of the dominator tree, and then we just
3839   // perform elimination straight on the sets by walking the congruence
3840   // class member uses in order, and eliminate the ones dominated by the
3841   // last member.   This is worst case O(E log E) where E = number of
3842   // instructions in a single congruence class.  In theory, this is all
3843   // instructions.   In practice, it is much faster, as most instructions are
3844   // either in singleton congruence classes or can't possibly be eliminated
3845   // anyway (if there are no overlapping DFS ranges in class).
3846   // When we find something not dominated, it becomes the new leader
3847   // for elimination purposes.
3848   // TODO: If we wanted to be faster, We could remove any members with no
3849   // overlapping ranges while sorting, as we will never eliminate anything
3850   // with those members, as they don't dominate anything else in our set.
3851 
3852   bool AnythingReplaced = false;
3853 
3854   // Since we are going to walk the domtree anyway, and we can't guarantee the
3855   // DFS numbers are updated, we compute some ourselves.
3856   DT->updateDFSNumbers();
3857 
3858   // Go through all of our phi nodes, and kill the arguments associated with
3859   // unreachable edges.
3860   auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) {
3861     for (auto &Operand : PHI->incoming_values())
3862       if (!ReachableEdges.count({PHI->getIncomingBlock(Operand), BB})) {
3863         LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI
3864                           << " for block "
3865                           << getBlockName(PHI->getIncomingBlock(Operand))
3866                           << " with poison due to it being unreachable\n");
3867         Operand.set(PoisonValue::get(PHI->getType()));
3868       }
3869   };
3870   // Replace unreachable phi arguments.
3871   // At this point, RevisitOnReachabilityChange only contains:
3872   //
3873   // 1. PHIs
3874   // 2. Temporaries that will convert to PHIs
3875   // 3. Operations that are affected by an unreachable edge but do not fit into
3876   // 1 or 2 (rare).
3877   // So it is a slight overshoot of what we want. We could make it exact by
3878   // using two SparseBitVectors per block.
3879   DenseMap<const BasicBlock *, unsigned> ReachablePredCount;
3880   for (auto &KV : ReachableEdges)
3881     ReachablePredCount[KV.getEnd()]++;
3882   for (auto &BBPair : RevisitOnReachabilityChange) {
3883     for (auto InstNum : BBPair.second) {
3884       auto *Inst = InstrFromDFSNum(InstNum);
3885       auto *PHI = dyn_cast<PHINode>(Inst);
3886       PHI = PHI ? PHI : dyn_cast_or_null<PHINode>(RealToTemp.lookup(Inst));
3887       if (!PHI)
3888         continue;
3889       auto *BB = BBPair.first;
3890       if (ReachablePredCount.lookup(BB) != PHI->getNumIncomingValues())
3891         ReplaceUnreachablePHIArgs(PHI, BB);
3892     }
3893   }
3894 
3895   // Map to store the use counts
3896   DenseMap<const Value *, unsigned int> UseCounts;
3897   for (auto *CC : reverse(CongruenceClasses)) {
3898     LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID()
3899                       << "\n");
3900     // Track the equivalent store info so we can decide whether to try
3901     // dead store elimination.
3902     SmallVector<ValueDFS, 8> PossibleDeadStores;
3903     SmallPtrSet<Instruction *, 8> ProbablyDead;
3904     if (CC->isDead() || CC->empty())
3905       continue;
3906     // Everything still in the TOP class is unreachable or dead.
3907     if (CC == TOPClass) {
3908       for (auto *M : *CC) {
3909         auto *VTE = ValueToExpression.lookup(M);
3910         if (VTE && isa<DeadExpression>(VTE))
3911           markInstructionForDeletion(cast<Instruction>(M));
3912         assert((!ReachableBlocks.count(cast<Instruction>(M)->getParent()) ||
3913                 InstructionsToErase.count(cast<Instruction>(M))) &&
3914                "Everything in TOP should be unreachable or dead at this "
3915                "point");
3916       }
3917       continue;
3918     }
3919 
3920     assert(CC->getLeader() && "We should have had a leader");
3921     // If this is a leader that is always available, and it's a
3922     // constant or has no equivalences, just replace everything with
3923     // it. We then update the congruence class with whatever members
3924     // are left.
3925     Value *Leader =
3926         CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
3927     if (alwaysAvailable(Leader)) {
3928       CongruenceClass::MemberSet MembersLeft;
3929       for (auto *M : *CC) {
3930         Value *Member = M;
3931         // Void things have no uses we can replace.
3932         if (Member == Leader || !isa<Instruction>(Member) ||
3933             Member->getType()->isVoidTy()) {
3934           MembersLeft.insert(Member);
3935           continue;
3936         }
3937         LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for "
3938                           << *Member << "\n");
3939         auto *I = cast<Instruction>(Member);
3940         assert(Leader != I && "About to accidentally remove our leader");
3941         replaceInstruction(I, Leader);
3942         AnythingReplaced = true;
3943       }
3944       CC->swap(MembersLeft);
3945     } else {
3946       // If this is a singleton, we can skip it.
3947       if (CC->size() != 1 || RealToTemp.count(Leader)) {
3948         // This is a stack because equality replacement/etc may place
3949         // constants in the middle of the member list, and we want to use
3950         // those constant values in preference to the current leader, over
3951         // the scope of those constants.
3952         ValueDFSStack EliminationStack;
3953 
3954         // Convert the members to DFS ordered sets and then merge them.
3955         SmallVector<ValueDFS, 8> DFSOrderedSet;
3956         convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead);
3957 
3958         // Sort the whole thing.
3959         llvm::sort(DFSOrderedSet);
3960         for (auto &VD : DFSOrderedSet) {
3961           int MemberDFSIn = VD.DFSIn;
3962           int MemberDFSOut = VD.DFSOut;
3963           Value *Def = VD.Def.getPointer();
3964           bool FromStore = VD.Def.getInt();
3965           Use *U = VD.U;
3966           // We ignore void things because we can't get a value from them.
3967           if (Def && Def->getType()->isVoidTy())
3968             continue;
3969           auto *DefInst = dyn_cast_or_null<Instruction>(Def);
3970           if (DefInst && AllTempInstructions.count(DefInst)) {
3971             auto *PN = cast<PHINode>(DefInst);
3972 
3973             // If this is a value phi and that's the expression we used, insert
3974             // it into the program
3975             // remove from temp instruction list.
3976             AllTempInstructions.erase(PN);
3977             auto *DefBlock = getBlockForValue(Def);
3978             LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def
3979                               << " into block "
3980                               << getBlockName(getBlockForValue(Def)) << "\n");
3981             PN->insertBefore(&DefBlock->front());
3982             Def = PN;
3983             NumGVNPHIOfOpsEliminations++;
3984           }
3985 
3986           if (EliminationStack.empty()) {
3987             LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n");
3988           } else {
3989             LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are ("
3990                               << EliminationStack.dfs_back().first << ","
3991                               << EliminationStack.dfs_back().second << ")\n");
3992           }
3993 
3994           LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << ","
3995                             << MemberDFSOut << ")\n");
3996           // First, we see if we are out of scope or empty.  If so,
3997           // and there equivalences, we try to replace the top of
3998           // stack with equivalences (if it's on the stack, it must
3999           // not have been eliminated yet).
4000           // Then we synchronize to our current scope, by
4001           // popping until we are back within a DFS scope that
4002           // dominates the current member.
4003           // Then, what happens depends on a few factors
4004           // If the stack is now empty, we need to push
4005           // If we have a constant or a local equivalence we want to
4006           // start using, we also push.
4007           // Otherwise, we walk along, processing members who are
4008           // dominated by this scope, and eliminate them.
4009           bool ShouldPush = Def && EliminationStack.empty();
4010           bool OutOfScope =
4011               !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut);
4012 
4013           if (OutOfScope || ShouldPush) {
4014             // Sync to our current scope.
4015             EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut);
4016             bool ShouldPush = Def && EliminationStack.empty();
4017             if (ShouldPush) {
4018               EliminationStack.push_back(Def, MemberDFSIn, MemberDFSOut);
4019             }
4020           }
4021 
4022           // Skip the Def's, we only want to eliminate on their uses.  But mark
4023           // dominated defs as dead.
4024           if (Def) {
4025             // For anything in this case, what and how we value number
4026             // guarantees that any side-effets that would have occurred (ie
4027             // throwing, etc) can be proven to either still occur (because it's
4028             // dominated by something that has the same side-effects), or never
4029             // occur.  Otherwise, we would not have been able to prove it value
4030             // equivalent to something else. For these things, we can just mark
4031             // it all dead.  Note that this is different from the "ProbablyDead"
4032             // set, which may not be dominated by anything, and thus, are only
4033             // easy to prove dead if they are also side-effect free. Note that
4034             // because stores are put in terms of the stored value, we skip
4035             // stored values here. If the stored value is really dead, it will
4036             // still be marked for deletion when we process it in its own class.
4037             if (!EliminationStack.empty() && Def != EliminationStack.back() &&
4038                 isa<Instruction>(Def) && !FromStore)
4039               markInstructionForDeletion(cast<Instruction>(Def));
4040             continue;
4041           }
4042           // At this point, we know it is a Use we are trying to possibly
4043           // replace.
4044 
4045           assert(isa<Instruction>(U->get()) &&
4046                  "Current def should have been an instruction");
4047           assert(isa<Instruction>(U->getUser()) &&
4048                  "Current user should have been an instruction");
4049 
4050           // If the thing we are replacing into is already marked to be dead,
4051           // this use is dead.  Note that this is true regardless of whether
4052           // we have anything dominating the use or not.  We do this here
4053           // because we are already walking all the uses anyway.
4054           Instruction *InstUse = cast<Instruction>(U->getUser());
4055           if (InstructionsToErase.count(InstUse)) {
4056             auto &UseCount = UseCounts[U->get()];
4057             if (--UseCount == 0) {
4058               ProbablyDead.insert(cast<Instruction>(U->get()));
4059             }
4060           }
4061 
4062           // If we get to this point, and the stack is empty we must have a use
4063           // with nothing we can use to eliminate this use, so just skip it.
4064           if (EliminationStack.empty())
4065             continue;
4066 
4067           Value *DominatingLeader = EliminationStack.back();
4068 
4069           auto *II = dyn_cast<IntrinsicInst>(DominatingLeader);
4070           bool isSSACopy = II && II->getIntrinsicID() == Intrinsic::ssa_copy;
4071           if (isSSACopy)
4072             DominatingLeader = II->getOperand(0);
4073 
4074           // Don't replace our existing users with ourselves.
4075           if (U->get() == DominatingLeader)
4076             continue;
4077           LLVM_DEBUG(dbgs()
4078                      << "Found replacement " << *DominatingLeader << " for "
4079                      << *U->get() << " in " << *(U->getUser()) << "\n");
4080 
4081           // If we replaced something in an instruction, handle the patching of
4082           // metadata.  Skip this if we are replacing predicateinfo with its
4083           // original operand, as we already know we can just drop it.
4084           auto *ReplacedInst = cast<Instruction>(U->get());
4085           auto *PI = PredInfo->getPredicateInfoFor(ReplacedInst);
4086           if (!PI || DominatingLeader != PI->OriginalOp)
4087             patchReplacementInstruction(ReplacedInst, DominatingLeader);
4088           U->set(DominatingLeader);
4089           // This is now a use of the dominating leader, which means if the
4090           // dominating leader was dead, it's now live!
4091           auto &LeaderUseCount = UseCounts[DominatingLeader];
4092           // It's about to be alive again.
4093           if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader))
4094             ProbablyDead.erase(cast<Instruction>(DominatingLeader));
4095           // For copy instructions, we use their operand as a leader,
4096           // which means we remove a user of the copy and it may become dead.
4097           if (isSSACopy) {
4098             unsigned &IIUseCount = UseCounts[II];
4099             if (--IIUseCount == 0)
4100               ProbablyDead.insert(II);
4101           }
4102           ++LeaderUseCount;
4103           AnythingReplaced = true;
4104         }
4105       }
4106     }
4107 
4108     // At this point, anything still in the ProbablyDead set is actually dead if
4109     // would be trivially dead.
4110     for (auto *I : ProbablyDead)
4111       if (wouldInstructionBeTriviallyDead(I))
4112         markInstructionForDeletion(I);
4113 
4114     // Cleanup the congruence class.
4115     CongruenceClass::MemberSet MembersLeft;
4116     for (auto *Member : *CC)
4117       if (!isa<Instruction>(Member) ||
4118           !InstructionsToErase.count(cast<Instruction>(Member)))
4119         MembersLeft.insert(Member);
4120     CC->swap(MembersLeft);
4121 
4122     // If we have possible dead stores to look at, try to eliminate them.
4123     if (CC->getStoreCount() > 0) {
4124       convertClassToLoadsAndStores(*CC, PossibleDeadStores);
4125       llvm::sort(PossibleDeadStores);
4126       ValueDFSStack EliminationStack;
4127       for (auto &VD : PossibleDeadStores) {
4128         int MemberDFSIn = VD.DFSIn;
4129         int MemberDFSOut = VD.DFSOut;
4130         Instruction *Member = cast<Instruction>(VD.Def.getPointer());
4131         if (EliminationStack.empty() ||
4132             !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut)) {
4133           // Sync to our current scope.
4134           EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut);
4135           if (EliminationStack.empty()) {
4136             EliminationStack.push_back(Member, MemberDFSIn, MemberDFSOut);
4137             continue;
4138           }
4139         }
4140         // We already did load elimination, so nothing to do here.
4141         if (isa<LoadInst>(Member))
4142           continue;
4143         assert(!EliminationStack.empty());
4144         Instruction *Leader = cast<Instruction>(EliminationStack.back());
4145         (void)Leader;
4146         assert(DT->dominates(Leader->getParent(), Member->getParent()));
4147         // Member is dominater by Leader, and thus dead
4148         LLVM_DEBUG(dbgs() << "Marking dead store " << *Member
4149                           << " that is dominated by " << *Leader << "\n");
4150         markInstructionForDeletion(Member);
4151         CC->erase(Member);
4152         ++NumGVNDeadStores;
4153       }
4154     }
4155   }
4156   return AnythingReplaced;
4157 }
4158 
4159 // This function provides global ranking of operations so that we can place them
4160 // in a canonical order.  Note that rank alone is not necessarily enough for a
4161 // complete ordering, as constants all have the same rank.  However, generally,
4162 // we will simplify an operation with all constants so that it doesn't matter
4163 // what order they appear in.
4164 unsigned int NewGVN::getRank(const Value *V) const {
4165   // Prefer constants to undef to anything else
4166   // Undef is a constant, have to check it first.
4167   // Prefer poison to undef as it's less defined.
4168   // Prefer smaller constants to constantexprs
4169   // Note that the order here matters because of class inheritance
4170   if (isa<ConstantExpr>(V))
4171     return 3;
4172   if (isa<PoisonValue>(V))
4173     return 1;
4174   if (isa<UndefValue>(V))
4175     return 2;
4176   if (isa<Constant>(V))
4177     return 0;
4178   if (auto *A = dyn_cast<Argument>(V))
4179     return 4 + A->getArgNo();
4180 
4181   // Need to shift the instruction DFS by number of arguments + 5 to account for
4182   // the constant and argument ranking above.
4183   unsigned Result = InstrToDFSNum(V);
4184   if (Result > 0)
4185     return 5 + NumFuncArgs + Result;
4186   // Unreachable or something else, just return a really large number.
4187   return ~0;
4188 }
4189 
4190 // This is a function that says whether two commutative operations should
4191 // have their order swapped when canonicalizing.
4192 bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const {
4193   // Because we only care about a total ordering, and don't rewrite expressions
4194   // in this order, we order by rank, which will give a strict weak ordering to
4195   // everything but constants, and then we order by pointer address.
4196   return std::make_pair(getRank(A), A) > std::make_pair(getRank(B), B);
4197 }
4198 
4199 bool NewGVN::shouldSwapOperandsForIntrinsic(const Value *A, const Value *B,
4200                                             const IntrinsicInst *I) const {
4201   auto LookupResult = IntrinsicInstPred.find(I);
4202   if (shouldSwapOperands(A, B)) {
4203     if (LookupResult == IntrinsicInstPred.end())
4204       IntrinsicInstPred.insert({I, B});
4205     else
4206       LookupResult->second = B;
4207     return true;
4208   }
4209 
4210   if (LookupResult != IntrinsicInstPred.end()) {
4211     auto *SeenPredicate = LookupResult->second;
4212     if (SeenPredicate) {
4213       if (SeenPredicate == B)
4214         return true;
4215       else
4216         LookupResult->second = nullptr;
4217     }
4218   }
4219   return false;
4220 }
4221 
4222 PreservedAnalyses NewGVNPass::run(Function &F, AnalysisManager<Function> &AM) {
4223   // Apparently the order in which we get these results matter for
4224   // the old GVN (see Chandler's comment in GVN.cpp). I'll keep
4225   // the same order here, just in case.
4226   auto &AC = AM.getResult<AssumptionAnalysis>(F);
4227   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
4228   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
4229   auto &AA = AM.getResult<AAManager>(F);
4230   auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
4231   bool Changed =
4232       NewGVN(F, &DT, &AC, &TLI, &AA, &MSSA, F.getParent()->getDataLayout())
4233           .runGVN();
4234   if (!Changed)
4235     return PreservedAnalyses::all();
4236   PreservedAnalyses PA;
4237   PA.preserve<DominatorTreeAnalysis>();
4238   return PA;
4239 }
4240