xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/GVN.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions.  It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PointerIntPair.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/AssumeBundleQueries.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/CFG.h"
33 #include "llvm/Analysis/DomTreeUpdater.h"
34 #include "llvm/Analysis/GlobalsModRef.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/MemorySSA.h"
40 #include "llvm/Analysis/MemorySSAUpdater.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/PHITransAddr.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/Dominators.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PassManager.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/Type.h"
66 #include "llvm/IR/Use.h"
67 #include "llvm/IR/Value.h"
68 #include "llvm/InitializePasses.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CommandLine.h"
72 #include "llvm/Support/Compiler.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include "llvm/Transforms/Utils.h"
76 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/Local.h"
79 #include "llvm/Transforms/Utils/SSAUpdater.h"
80 #include "llvm/Transforms/Utils/VNCoercion.h"
81 #include <algorithm>
82 #include <cassert>
83 #include <cstdint>
84 #include <utility>
85 
86 using namespace llvm;
87 using namespace llvm::gvn;
88 using namespace llvm::VNCoercion;
89 using namespace PatternMatch;
90 
91 #define DEBUG_TYPE "gvn"
92 
93 STATISTIC(NumGVNInstr, "Number of instructions deleted");
94 STATISTIC(NumGVNLoad, "Number of loads deleted");
95 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
96 STATISTIC(NumGVNBlocks, "Number of blocks merged");
97 STATISTIC(NumGVNSimpl, "Number of instructions simplified");
98 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
99 STATISTIC(NumPRELoad, "Number of loads PRE'd");
100 STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
101 
102 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
103           "Number of blocks speculated as available in "
104           "IsValueFullyAvailableInBlock(), max");
105 STATISTIC(MaxBBSpeculationCutoffReachedTimes,
106           "Number of times we we reached gvn-max-block-speculations cut-off "
107           "preventing further exploration");
108 
109 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
110 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
111 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
112                                             cl::init(true));
113 static cl::opt<bool>
114 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
115                                 cl::init(true));
116 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
117 
118 static cl::opt<uint32_t> MaxNumDeps(
119     "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore,
120     cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
121 
122 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
123 static cl::opt<uint32_t> MaxBBSpeculations(
124     "gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::ZeroOrMore,
125     cl::desc("Max number of blocks we're willing to speculate on (and recurse "
126              "into) when deducing if a value is fully available or not in GVN "
127              "(default = 600)"));
128 
129 struct llvm::GVNPass::Expression {
130   uint32_t opcode;
131   bool commutative = false;
132   Type *type = nullptr;
133   SmallVector<uint32_t, 4> varargs;
134 
135   Expression(uint32_t o = ~2U) : opcode(o) {}
136 
137   bool operator==(const Expression &other) const {
138     if (opcode != other.opcode)
139       return false;
140     if (opcode == ~0U || opcode == ~1U)
141       return true;
142     if (type != other.type)
143       return false;
144     if (varargs != other.varargs)
145       return false;
146     return true;
147   }
148 
149   friend hash_code hash_value(const Expression &Value) {
150     return hash_combine(
151         Value.opcode, Value.type,
152         hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
153   }
154 };
155 
156 namespace llvm {
157 
158 template <> struct DenseMapInfo<GVNPass::Expression> {
159   static inline GVNPass::Expression getEmptyKey() { return ~0U; }
160   static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
161 
162   static unsigned getHashValue(const GVNPass::Expression &e) {
163     using llvm::hash_value;
164 
165     return static_cast<unsigned>(hash_value(e));
166   }
167 
168   static bool isEqual(const GVNPass::Expression &LHS,
169                       const GVNPass::Expression &RHS) {
170     return LHS == RHS;
171   }
172 };
173 
174 } // end namespace llvm
175 
176 /// Represents a particular available value that we know how to materialize.
177 /// Materialization of an AvailableValue never fails.  An AvailableValue is
178 /// implicitly associated with a rematerialization point which is the
179 /// location of the instruction from which it was formed.
180 struct llvm::gvn::AvailableValue {
181   enum ValType {
182     SimpleVal, // A simple offsetted value that is accessed.
183     LoadVal,   // A value produced by a load.
184     MemIntrin, // A memory intrinsic which is loaded from.
185     UndefVal   // A UndefValue representing a value from dead block (which
186                // is not yet physically removed from the CFG).
187   };
188 
189   /// V - The value that is live out of the block.
190   PointerIntPair<Value *, 2, ValType> Val;
191 
192   /// Offset - The byte offset in Val that is interesting for the load query.
193   unsigned Offset = 0;
194 
195   static AvailableValue get(Value *V, unsigned Offset = 0) {
196     AvailableValue Res;
197     Res.Val.setPointer(V);
198     Res.Val.setInt(SimpleVal);
199     Res.Offset = Offset;
200     return Res;
201   }
202 
203   static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
204     AvailableValue Res;
205     Res.Val.setPointer(MI);
206     Res.Val.setInt(MemIntrin);
207     Res.Offset = Offset;
208     return Res;
209   }
210 
211   static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
212     AvailableValue Res;
213     Res.Val.setPointer(Load);
214     Res.Val.setInt(LoadVal);
215     Res.Offset = Offset;
216     return Res;
217   }
218 
219   static AvailableValue getUndef() {
220     AvailableValue Res;
221     Res.Val.setPointer(nullptr);
222     Res.Val.setInt(UndefVal);
223     Res.Offset = 0;
224     return Res;
225   }
226 
227   bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
228   bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
229   bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
230   bool isUndefValue() const { return Val.getInt() == UndefVal; }
231 
232   Value *getSimpleValue() const {
233     assert(isSimpleValue() && "Wrong accessor");
234     return Val.getPointer();
235   }
236 
237   LoadInst *getCoercedLoadValue() const {
238     assert(isCoercedLoadValue() && "Wrong accessor");
239     return cast<LoadInst>(Val.getPointer());
240   }
241 
242   MemIntrinsic *getMemIntrinValue() const {
243     assert(isMemIntrinValue() && "Wrong accessor");
244     return cast<MemIntrinsic>(Val.getPointer());
245   }
246 
247   /// Emit code at the specified insertion point to adjust the value defined
248   /// here to the specified type. This handles various coercion cases.
249   Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt,
250                                   GVNPass &gvn) const;
251 };
252 
253 /// Represents an AvailableValue which can be rematerialized at the end of
254 /// the associated BasicBlock.
255 struct llvm::gvn::AvailableValueInBlock {
256   /// BB - The basic block in question.
257   BasicBlock *BB = nullptr;
258 
259   /// AV - The actual available value
260   AvailableValue AV;
261 
262   static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
263     AvailableValueInBlock Res;
264     Res.BB = BB;
265     Res.AV = std::move(AV);
266     return Res;
267   }
268 
269   static AvailableValueInBlock get(BasicBlock *BB, Value *V,
270                                    unsigned Offset = 0) {
271     return get(BB, AvailableValue::get(V, Offset));
272   }
273 
274   static AvailableValueInBlock getUndef(BasicBlock *BB) {
275     return get(BB, AvailableValue::getUndef());
276   }
277 
278   /// Emit code at the end of this block to adjust the value defined here to
279   /// the specified type. This handles various coercion cases.
280   Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const {
281     return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
282   }
283 };
284 
285 //===----------------------------------------------------------------------===//
286 //                     ValueTable Internal Functions
287 //===----------------------------------------------------------------------===//
288 
289 GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
290   Expression e;
291   e.type = I->getType();
292   e.opcode = I->getOpcode();
293   if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
294     // gc.relocate is 'special' call: its second and third operands are
295     // not real values, but indices into statepoint's argument list.
296     // Use the refered to values for purposes of identity.
297     e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
298     e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
299     e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
300   } else {
301     for (Use &Op : I->operands())
302       e.varargs.push_back(lookupOrAdd(Op));
303   }
304   if (I->isCommutative()) {
305     // Ensure that commutative instructions that only differ by a permutation
306     // of their operands get the same value number by sorting the operand value
307     // numbers.  Since commutative operands are the 1st two operands it is more
308     // efficient to sort by hand rather than using, say, std::sort.
309     assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
310     if (e.varargs[0] > e.varargs[1])
311       std::swap(e.varargs[0], e.varargs[1]);
312     e.commutative = true;
313   }
314 
315   if (auto *C = dyn_cast<CmpInst>(I)) {
316     // Sort the operand value numbers so x<y and y>x get the same value number.
317     CmpInst::Predicate Predicate = C->getPredicate();
318     if (e.varargs[0] > e.varargs[1]) {
319       std::swap(e.varargs[0], e.varargs[1]);
320       Predicate = CmpInst::getSwappedPredicate(Predicate);
321     }
322     e.opcode = (C->getOpcode() << 8) | Predicate;
323     e.commutative = true;
324   } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
325     e.varargs.append(E->idx_begin(), E->idx_end());
326   } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
327     ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
328     e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
329   }
330 
331   return e;
332 }
333 
334 GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
335     unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
336   assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
337          "Not a comparison!");
338   Expression e;
339   e.type = CmpInst::makeCmpResultType(LHS->getType());
340   e.varargs.push_back(lookupOrAdd(LHS));
341   e.varargs.push_back(lookupOrAdd(RHS));
342 
343   // Sort the operand value numbers so x<y and y>x get the same value number.
344   if (e.varargs[0] > e.varargs[1]) {
345     std::swap(e.varargs[0], e.varargs[1]);
346     Predicate = CmpInst::getSwappedPredicate(Predicate);
347   }
348   e.opcode = (Opcode << 8) | Predicate;
349   e.commutative = true;
350   return e;
351 }
352 
353 GVNPass::Expression
354 GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
355   assert(EI && "Not an ExtractValueInst?");
356   Expression e;
357   e.type = EI->getType();
358   e.opcode = 0;
359 
360   WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
361   if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
362     // EI is an extract from one of our with.overflow intrinsics. Synthesize
363     // a semantically equivalent expression instead of an extract value
364     // expression.
365     e.opcode = WO->getBinaryOp();
366     e.varargs.push_back(lookupOrAdd(WO->getLHS()));
367     e.varargs.push_back(lookupOrAdd(WO->getRHS()));
368     return e;
369   }
370 
371   // Not a recognised intrinsic. Fall back to producing an extract value
372   // expression.
373   e.opcode = EI->getOpcode();
374   for (Use &Op : EI->operands())
375     e.varargs.push_back(lookupOrAdd(Op));
376 
377   append_range(e.varargs, EI->indices());
378 
379   return e;
380 }
381 
382 //===----------------------------------------------------------------------===//
383 //                     ValueTable External Functions
384 //===----------------------------------------------------------------------===//
385 
386 GVNPass::ValueTable::ValueTable() = default;
387 GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
388 GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
389 GVNPass::ValueTable::~ValueTable() = default;
390 GVNPass::ValueTable &
391 GVNPass::ValueTable::operator=(const GVNPass::ValueTable &Arg) = default;
392 
393 /// add - Insert a value into the table with a specified value number.
394 void GVNPass::ValueTable::add(Value *V, uint32_t num) {
395   valueNumbering.insert(std::make_pair(V, num));
396   if (PHINode *PN = dyn_cast<PHINode>(V))
397     NumberingPhi[num] = PN;
398 }
399 
400 uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
401   if (AA->doesNotAccessMemory(C)) {
402     Expression exp = createExpr(C);
403     uint32_t e = assignExpNewValueNum(exp).first;
404     valueNumbering[C] = e;
405     return e;
406   } else if (MD && AA->onlyReadsMemory(C)) {
407     Expression exp = createExpr(C);
408     auto ValNum = assignExpNewValueNum(exp);
409     if (ValNum.second) {
410       valueNumbering[C] = ValNum.first;
411       return ValNum.first;
412     }
413 
414     MemDepResult local_dep = MD->getDependency(C);
415 
416     if (!local_dep.isDef() && !local_dep.isNonLocal()) {
417       valueNumbering[C] =  nextValueNumber;
418       return nextValueNumber++;
419     }
420 
421     if (local_dep.isDef()) {
422       // For masked load/store intrinsics, the local_dep may actully be
423       // a normal load or store instruction.
424       CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
425 
426       if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
427         valueNumbering[C] = nextValueNumber;
428         return nextValueNumber++;
429       }
430 
431       for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
432         uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
433         uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
434         if (c_vn != cd_vn) {
435           valueNumbering[C] = nextValueNumber;
436           return nextValueNumber++;
437         }
438       }
439 
440       uint32_t v = lookupOrAdd(local_cdep);
441       valueNumbering[C] = v;
442       return v;
443     }
444 
445     // Non-local case.
446     const MemoryDependenceResults::NonLocalDepInfo &deps =
447         MD->getNonLocalCallDependency(C);
448     // FIXME: Move the checking logic to MemDep!
449     CallInst* cdep = nullptr;
450 
451     // Check to see if we have a single dominating call instruction that is
452     // identical to C.
453     for (unsigned i = 0, e = deps.size(); i != e; ++i) {
454       const NonLocalDepEntry *I = &deps[i];
455       if (I->getResult().isNonLocal())
456         continue;
457 
458       // We don't handle non-definitions.  If we already have a call, reject
459       // instruction dependencies.
460       if (!I->getResult().isDef() || cdep != nullptr) {
461         cdep = nullptr;
462         break;
463       }
464 
465       CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
466       // FIXME: All duplicated with non-local case.
467       if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
468         cdep = NonLocalDepCall;
469         continue;
470       }
471 
472       cdep = nullptr;
473       break;
474     }
475 
476     if (!cdep) {
477       valueNumbering[C] = nextValueNumber;
478       return nextValueNumber++;
479     }
480 
481     if (cdep->arg_size() != C->arg_size()) {
482       valueNumbering[C] = nextValueNumber;
483       return nextValueNumber++;
484     }
485     for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
486       uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
487       uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
488       if (c_vn != cd_vn) {
489         valueNumbering[C] = nextValueNumber;
490         return nextValueNumber++;
491       }
492     }
493 
494     uint32_t v = lookupOrAdd(cdep);
495     valueNumbering[C] = v;
496     return v;
497   } else {
498     valueNumbering[C] = nextValueNumber;
499     return nextValueNumber++;
500   }
501 }
502 
503 /// Returns true if a value number exists for the specified value.
504 bool GVNPass::ValueTable::exists(Value *V) const {
505   return valueNumbering.count(V) != 0;
506 }
507 
508 /// lookup_or_add - Returns the value number for the specified value, assigning
509 /// it a new number if it did not have one before.
510 uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
511   DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
512   if (VI != valueNumbering.end())
513     return VI->second;
514 
515   if (!isa<Instruction>(V)) {
516     valueNumbering[V] = nextValueNumber;
517     return nextValueNumber++;
518   }
519 
520   Instruction* I = cast<Instruction>(V);
521   Expression exp;
522   switch (I->getOpcode()) {
523     case Instruction::Call:
524       return lookupOrAddCall(cast<CallInst>(I));
525     case Instruction::FNeg:
526     case Instruction::Add:
527     case Instruction::FAdd:
528     case Instruction::Sub:
529     case Instruction::FSub:
530     case Instruction::Mul:
531     case Instruction::FMul:
532     case Instruction::UDiv:
533     case Instruction::SDiv:
534     case Instruction::FDiv:
535     case Instruction::URem:
536     case Instruction::SRem:
537     case Instruction::FRem:
538     case Instruction::Shl:
539     case Instruction::LShr:
540     case Instruction::AShr:
541     case Instruction::And:
542     case Instruction::Or:
543     case Instruction::Xor:
544     case Instruction::ICmp:
545     case Instruction::FCmp:
546     case Instruction::Trunc:
547     case Instruction::ZExt:
548     case Instruction::SExt:
549     case Instruction::FPToUI:
550     case Instruction::FPToSI:
551     case Instruction::UIToFP:
552     case Instruction::SIToFP:
553     case Instruction::FPTrunc:
554     case Instruction::FPExt:
555     case Instruction::PtrToInt:
556     case Instruction::IntToPtr:
557     case Instruction::AddrSpaceCast:
558     case Instruction::BitCast:
559     case Instruction::Select:
560     case Instruction::Freeze:
561     case Instruction::ExtractElement:
562     case Instruction::InsertElement:
563     case Instruction::ShuffleVector:
564     case Instruction::InsertValue:
565     case Instruction::GetElementPtr:
566       exp = createExpr(I);
567       break;
568     case Instruction::ExtractValue:
569       exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
570       break;
571     case Instruction::PHI:
572       valueNumbering[V] = nextValueNumber;
573       NumberingPhi[nextValueNumber] = cast<PHINode>(V);
574       return nextValueNumber++;
575     default:
576       valueNumbering[V] = nextValueNumber;
577       return nextValueNumber++;
578   }
579 
580   uint32_t e = assignExpNewValueNum(exp).first;
581   valueNumbering[V] = e;
582   return e;
583 }
584 
585 /// Returns the value number of the specified value. Fails if
586 /// the value has not yet been numbered.
587 uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
588   DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
589   if (Verify) {
590     assert(VI != valueNumbering.end() && "Value not numbered?");
591     return VI->second;
592   }
593   return (VI != valueNumbering.end()) ? VI->second : 0;
594 }
595 
596 /// Returns the value number of the given comparison,
597 /// assigning it a new number if it did not have one before.  Useful when
598 /// we deduced the result of a comparison, but don't immediately have an
599 /// instruction realizing that comparison to hand.
600 uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
601                                              CmpInst::Predicate Predicate,
602                                              Value *LHS, Value *RHS) {
603   Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
604   return assignExpNewValueNum(exp).first;
605 }
606 
607 /// Remove all entries from the ValueTable.
608 void GVNPass::ValueTable::clear() {
609   valueNumbering.clear();
610   expressionNumbering.clear();
611   NumberingPhi.clear();
612   PhiTranslateTable.clear();
613   nextValueNumber = 1;
614   Expressions.clear();
615   ExprIdx.clear();
616   nextExprNumber = 0;
617 }
618 
619 /// Remove a value from the value numbering.
620 void GVNPass::ValueTable::erase(Value *V) {
621   uint32_t Num = valueNumbering.lookup(V);
622   valueNumbering.erase(V);
623   // If V is PHINode, V <--> value number is an one-to-one mapping.
624   if (isa<PHINode>(V))
625     NumberingPhi.erase(Num);
626 }
627 
628 /// verifyRemoved - Verify that the value is removed from all internal data
629 /// structures.
630 void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
631   for (DenseMap<Value*, uint32_t>::const_iterator
632          I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
633     assert(I->first != V && "Inst still occurs in value numbering map!");
634   }
635 }
636 
637 //===----------------------------------------------------------------------===//
638 //                                GVN Pass
639 //===----------------------------------------------------------------------===//
640 
641 bool GVNPass::isPREEnabled() const {
642   return Options.AllowPRE.getValueOr(GVNEnablePRE);
643 }
644 
645 bool GVNPass::isLoadPREEnabled() const {
646   return Options.AllowLoadPRE.getValueOr(GVNEnableLoadPRE);
647 }
648 
649 bool GVNPass::isLoadInLoopPREEnabled() const {
650   return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
651 }
652 
653 bool GVNPass::isLoadPRESplitBackedgeEnabled() const {
654   return Options.AllowLoadPRESplitBackedge.getValueOr(
655       GVNEnableSplitBackedgeInLoadPRE);
656 }
657 
658 bool GVNPass::isMemDepEnabled() const {
659   return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
660 }
661 
662 PreservedAnalyses GVNPass::run(Function &F, FunctionAnalysisManager &AM) {
663   // FIXME: The order of evaluation of these 'getResult' calls is very
664   // significant! Re-ordering these variables will cause GVN when run alone to
665   // be less effective! We should fix memdep and basic-aa to not exhibit this
666   // behavior, but until then don't change the order here.
667   auto &AC = AM.getResult<AssumptionAnalysis>(F);
668   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
669   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
670   auto &AA = AM.getResult<AAManager>(F);
671   auto *MemDep =
672       isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
673   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
674   auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
675   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
676   bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
677                          MSSA ? &MSSA->getMSSA() : nullptr);
678   if (!Changed)
679     return PreservedAnalyses::all();
680   PreservedAnalyses PA;
681   PA.preserve<DominatorTreeAnalysis>();
682   PA.preserve<TargetLibraryAnalysis>();
683   if (MSSA)
684     PA.preserve<MemorySSAAnalysis>();
685   if (LI)
686     PA.preserve<LoopAnalysis>();
687   return PA;
688 }
689 
690 void GVNPass::printPipeline(
691     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
692   static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline(
693       OS, MapClassName2PassName);
694 
695   OS << "<";
696   if (Options.AllowPRE != None)
697     OS << (Options.AllowPRE.getValue() ? "" : "no-") << "pre;";
698   if (Options.AllowLoadPRE != None)
699     OS << (Options.AllowLoadPRE.getValue() ? "" : "no-") << "load-pre;";
700   if (Options.AllowLoadPRESplitBackedge != None)
701     OS << (Options.AllowLoadPRESplitBackedge.getValue() ? "" : "no-")
702        << "split-backedge-load-pre;";
703   if (Options.AllowMemDep != None)
704     OS << (Options.AllowMemDep.getValue() ? "" : "no-") << "memdep";
705   OS << ">";
706 }
707 
708 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
709 LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
710   errs() << "{\n";
711   for (auto &I : d) {
712     errs() << I.first << "\n";
713     I.second->dump();
714   }
715   errs() << "}\n";
716 }
717 #endif
718 
719 enum class AvailabilityState : char {
720   /// We know the block *is not* fully available. This is a fixpoint.
721   Unavailable = 0,
722   /// We know the block *is* fully available. This is a fixpoint.
723   Available = 1,
724   /// We do not know whether the block is fully available or not,
725   /// but we are currently speculating that it will be.
726   /// If it would have turned out that the block was, in fact, not fully
727   /// available, this would have been cleaned up into an Unavailable.
728   SpeculativelyAvailable = 2,
729 };
730 
731 /// Return true if we can prove that the value
732 /// we're analyzing is fully available in the specified block.  As we go, keep
733 /// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
734 /// map is actually a tri-state map with the following values:
735 ///   0) we know the block *is not* fully available.
736 ///   1) we know the block *is* fully available.
737 ///   2) we do not know whether the block is fully available or not, but we are
738 ///      currently speculating that it will be.
739 static bool IsValueFullyAvailableInBlock(
740     BasicBlock *BB,
741     DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
742   SmallVector<BasicBlock *, 32> Worklist;
743   Optional<BasicBlock *> UnavailableBB;
744 
745   // The number of times we didn't find an entry for a block in a map and
746   // optimistically inserted an entry marking block as speculatively available.
747   unsigned NumNewNewSpeculativelyAvailableBBs = 0;
748 
749 #ifndef NDEBUG
750   SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
751   SmallVector<BasicBlock *, 32> AvailableBBs;
752 #endif
753 
754   Worklist.emplace_back(BB);
755   while (!Worklist.empty()) {
756     BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
757     // Optimistically assume that the block is Speculatively Available and check
758     // to see if we already know about this block in one lookup.
759     std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
760         FullyAvailableBlocks.try_emplace(
761             CurrBB, AvailabilityState::SpeculativelyAvailable);
762     AvailabilityState &State = IV.first->second;
763 
764     // Did the entry already exist for this block?
765     if (!IV.second) {
766       if (State == AvailabilityState::Unavailable) {
767         UnavailableBB = CurrBB;
768         break; // Backpropagate unavailability info.
769       }
770 
771 #ifndef NDEBUG
772       AvailableBBs.emplace_back(CurrBB);
773 #endif
774       continue; // Don't recurse further, but continue processing worklist.
775     }
776 
777     // No entry found for block.
778     ++NumNewNewSpeculativelyAvailableBBs;
779     bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
780 
781     // If we have exhausted our budget, mark this block as unavailable.
782     // Also, if this block has no predecessors, the value isn't live-in here.
783     if (OutOfBudget || pred_empty(CurrBB)) {
784       MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
785       State = AvailabilityState::Unavailable;
786       UnavailableBB = CurrBB;
787       break; // Backpropagate unavailability info.
788     }
789 
790     // Tentatively consider this block as speculatively available.
791 #ifndef NDEBUG
792     NewSpeculativelyAvailableBBs.insert(CurrBB);
793 #endif
794     // And further recurse into block's predecessors, in depth-first order!
795     Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
796   }
797 
798 #if LLVM_ENABLE_STATS
799   IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
800       NumNewNewSpeculativelyAvailableBBs);
801 #endif
802 
803   // If the block isn't marked as fixpoint yet
804   // (the Unavailable and Available states are fixpoints)
805   auto MarkAsFixpointAndEnqueueSuccessors =
806       [&](BasicBlock *BB, AvailabilityState FixpointState) {
807         auto It = FullyAvailableBlocks.find(BB);
808         if (It == FullyAvailableBlocks.end())
809           return; // Never queried this block, leave as-is.
810         switch (AvailabilityState &State = It->second) {
811         case AvailabilityState::Unavailable:
812         case AvailabilityState::Available:
813           return; // Don't backpropagate further, continue processing worklist.
814         case AvailabilityState::SpeculativelyAvailable: // Fix it!
815           State = FixpointState;
816 #ifndef NDEBUG
817           assert(NewSpeculativelyAvailableBBs.erase(BB) &&
818                  "Found a speculatively available successor leftover?");
819 #endif
820           // Queue successors for further processing.
821           Worklist.append(succ_begin(BB), succ_end(BB));
822           return;
823         }
824       };
825 
826   if (UnavailableBB) {
827     // Okay, we have encountered an unavailable block.
828     // Mark speculatively available blocks reachable from UnavailableBB as
829     // unavailable as well. Paths are terminated when they reach blocks not in
830     // FullyAvailableBlocks or they are not marked as speculatively available.
831     Worklist.clear();
832     Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
833     while (!Worklist.empty())
834       MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
835                                          AvailabilityState::Unavailable);
836   }
837 
838 #ifndef NDEBUG
839   Worklist.clear();
840   for (BasicBlock *AvailableBB : AvailableBBs)
841     Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
842   while (!Worklist.empty())
843     MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
844                                        AvailabilityState::Available);
845 
846   assert(NewSpeculativelyAvailableBBs.empty() &&
847          "Must have fixed all the new speculatively available blocks.");
848 #endif
849 
850   return !UnavailableBB;
851 }
852 
853 /// Given a set of loads specified by ValuesPerBlock,
854 /// construct SSA form, allowing us to eliminate Load.  This returns the value
855 /// that should be used at Load's definition site.
856 static Value *
857 ConstructSSAForLoadSet(LoadInst *Load,
858                        SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
859                        GVNPass &gvn) {
860   // Check for the fully redundant, dominating load case.  In this case, we can
861   // just use the dominating value directly.
862   if (ValuesPerBlock.size() == 1 &&
863       gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
864                                                Load->getParent())) {
865     assert(!ValuesPerBlock[0].AV.isUndefValue() &&
866            "Dead BB dominate this block");
867     return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
868   }
869 
870   // Otherwise, we have to construct SSA form.
871   SmallVector<PHINode*, 8> NewPHIs;
872   SSAUpdater SSAUpdate(&NewPHIs);
873   SSAUpdate.Initialize(Load->getType(), Load->getName());
874 
875   for (const AvailableValueInBlock &AV : ValuesPerBlock) {
876     BasicBlock *BB = AV.BB;
877 
878     if (AV.AV.isUndefValue())
879       continue;
880 
881     if (SSAUpdate.HasValueForBlock(BB))
882       continue;
883 
884     // If the value is the load that we will be eliminating, and the block it's
885     // available in is the block that the load is in, then don't add it as
886     // SSAUpdater will resolve the value to the relevant phi which may let it
887     // avoid phi construction entirely if there's actually only one value.
888     if (BB == Load->getParent() &&
889         ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
890          (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
891       continue;
892 
893     SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
894   }
895 
896   // Perform PHI construction.
897   return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
898 }
899 
900 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
901                                                 Instruction *InsertPt,
902                                                 GVNPass &gvn) const {
903   Value *Res;
904   Type *LoadTy = Load->getType();
905   const DataLayout &DL = Load->getModule()->getDataLayout();
906   if (isSimpleValue()) {
907     Res = getSimpleValue();
908     if (Res->getType() != LoadTy) {
909       Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
910 
911       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
912                         << "  " << *getSimpleValue() << '\n'
913                         << *Res << '\n'
914                         << "\n\n\n");
915     }
916   } else if (isCoercedLoadValue()) {
917     LoadInst *CoercedLoad = getCoercedLoadValue();
918     if (CoercedLoad->getType() == LoadTy && Offset == 0) {
919       Res = CoercedLoad;
920     } else {
921       Res = getLoadValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
922       // We would like to use gvn.markInstructionForDeletion here, but we can't
923       // because the load is already memoized into the leader map table that GVN
924       // tracks.  It is potentially possible to remove the load from the table,
925       // but then there all of the operations based on it would need to be
926       // rehashed.  Just leave the dead load around.
927       gvn.getMemDep().removeInstruction(CoercedLoad);
928       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
929                         << "  " << *getCoercedLoadValue() << '\n'
930                         << *Res << '\n'
931                         << "\n\n\n");
932     }
933   } else if (isMemIntrinValue()) {
934     Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
935                                  InsertPt, DL);
936     LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
937                       << "  " << *getMemIntrinValue() << '\n'
938                       << *Res << '\n'
939                       << "\n\n\n");
940   } else {
941     llvm_unreachable("Should not materialize value from dead block");
942   }
943   assert(Res && "failed to materialize?");
944   return Res;
945 }
946 
947 static bool isLifetimeStart(const Instruction *Inst) {
948   if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
949     return II->getIntrinsicID() == Intrinsic::lifetime_start;
950   return false;
951 }
952 
953 /// Assuming To can be reached from both From and Between, does Between lie on
954 /// every path from From to To?
955 static bool liesBetween(const Instruction *From, Instruction *Between,
956                         const Instruction *To, DominatorTree *DT) {
957   if (From->getParent() == Between->getParent())
958     return DT->dominates(From, Between);
959   SmallSet<BasicBlock *, 1> Exclusion;
960   Exclusion.insert(Between->getParent());
961   return !isPotentiallyReachable(From, To, &Exclusion, DT);
962 }
963 
964 /// Try to locate the three instruction involved in a missed
965 /// load-elimination case that is due to an intervening store.
966 static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
967                                    DominatorTree *DT,
968                                    OptimizationRemarkEmitter *ORE) {
969   using namespace ore;
970 
971   User *OtherAccess = nullptr;
972 
973   OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
974   R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
975     << setExtraArgs();
976 
977   for (auto *U : Load->getPointerOperand()->users()) {
978     if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
979         cast<Instruction>(U)->getFunction() == Load->getFunction() &&
980         DT->dominates(cast<Instruction>(U), Load)) {
981       // Use the most immediately dominating value
982       if (OtherAccess) {
983         if (DT->dominates(cast<Instruction>(OtherAccess), cast<Instruction>(U)))
984           OtherAccess = U;
985         else
986           assert(DT->dominates(cast<Instruction>(U),
987                                cast<Instruction>(OtherAccess)));
988       } else
989         OtherAccess = U;
990     }
991   }
992 
993   if (!OtherAccess) {
994     // There is no dominating use, check if we can find a closest non-dominating
995     // use that lies between any other potentially available use and Load.
996     for (auto *U : Load->getPointerOperand()->users()) {
997       if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
998           cast<Instruction>(U)->getFunction() == Load->getFunction() &&
999           isPotentiallyReachable(cast<Instruction>(U), Load, nullptr, DT)) {
1000         if (OtherAccess) {
1001           if (liesBetween(cast<Instruction>(OtherAccess), cast<Instruction>(U),
1002                           Load, DT)) {
1003             OtherAccess = U;
1004           } else if (!liesBetween(cast<Instruction>(U),
1005                                   cast<Instruction>(OtherAccess), Load, DT)) {
1006             // These uses are both partially available at Load were it not for
1007             // the clobber, but neither lies strictly after the other.
1008             OtherAccess = nullptr;
1009             break;
1010           } // else: keep current OtherAccess since it lies between U and Load
1011         } else {
1012           OtherAccess = U;
1013         }
1014       }
1015     }
1016   }
1017 
1018   if (OtherAccess)
1019     R << " in favor of " << NV("OtherAccess", OtherAccess);
1020 
1021   R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1022 
1023   ORE->emit(R);
1024 }
1025 
1026 bool GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1027                                       Value *Address, AvailableValue &Res) {
1028   assert((DepInfo.isDef() || DepInfo.isClobber()) &&
1029          "expected a local dependence");
1030   assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1031 
1032   const DataLayout &DL = Load->getModule()->getDataLayout();
1033 
1034   Instruction *DepInst = DepInfo.getInst();
1035   if (DepInfo.isClobber()) {
1036     // If the dependence is to a store that writes to a superset of the bits
1037     // read by the load, we can extract the bits we need for the load from the
1038     // stored value.
1039     if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1040       // Can't forward from non-atomic to atomic without violating memory model.
1041       if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1042         int Offset =
1043             analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1044         if (Offset != -1) {
1045           Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
1046           return true;
1047         }
1048       }
1049     }
1050 
1051     // Check to see if we have something like this:
1052     //    load i32* P
1053     //    load i8* (P+1)
1054     // if we have this, replace the later with an extraction from the former.
1055     if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1056       // If this is a clobber and L is the first instruction in its block, then
1057       // we have the first instruction in the entry block.
1058       // Can't forward from non-atomic to atomic without violating memory model.
1059       if (DepLoad != Load && Address &&
1060           Load->isAtomic() <= DepLoad->isAtomic()) {
1061         Type *LoadType = Load->getType();
1062         int Offset = -1;
1063 
1064         // If MD reported clobber, check it was nested.
1065         if (DepInfo.isClobber() &&
1066             canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1067           const auto ClobberOff = MD->getClobberOffset(DepLoad);
1068           // GVN has no deal with a negative offset.
1069           Offset = (ClobberOff == None || ClobberOff.getValue() < 0)
1070                        ? -1
1071                        : ClobberOff.getValue();
1072         }
1073         if (Offset == -1)
1074           Offset =
1075               analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1076         if (Offset != -1) {
1077           Res = AvailableValue::getLoad(DepLoad, Offset);
1078           return true;
1079         }
1080       }
1081     }
1082 
1083     // If the clobbering value is a memset/memcpy/memmove, see if we can
1084     // forward a value on from it.
1085     if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1086       if (Address && !Load->isAtomic()) {
1087         int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
1088                                                       DepMI, DL);
1089         if (Offset != -1) {
1090           Res = AvailableValue::getMI(DepMI, Offset);
1091           return true;
1092         }
1093       }
1094     }
1095     // Nothing known about this clobber, have to be conservative
1096     LLVM_DEBUG(
1097         // fast print dep, using operator<< on instruction is too slow.
1098         dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1099         dbgs() << " is clobbered by " << *DepInst << '\n';);
1100     if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1101       reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1102 
1103     return false;
1104   }
1105   assert(DepInfo.isDef() && "follows from above");
1106 
1107   // Loading the allocation -> undef.
1108   if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
1109       isAlignedAllocLikeFn(DepInst, TLI) ||
1110       // Loading immediately after lifetime begin -> undef.
1111       isLifetimeStart(DepInst)) {
1112     Res = AvailableValue::get(UndefValue::get(Load->getType()));
1113     return true;
1114   }
1115 
1116   // Loading from calloc (which zero initializes memory) -> zero
1117   if (isCallocLikeFn(DepInst, TLI)) {
1118     Res = AvailableValue::get(Constant::getNullValue(Load->getType()));
1119     return true;
1120   }
1121 
1122   if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1123     // Reject loads and stores that are to the same address but are of
1124     // different types if we have to. If the stored value is convertable to
1125     // the loaded value, we can reuse it.
1126     if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1127                                          DL))
1128       return false;
1129 
1130     // Can't forward from non-atomic to atomic without violating memory model.
1131     if (S->isAtomic() < Load->isAtomic())
1132       return false;
1133 
1134     Res = AvailableValue::get(S->getValueOperand());
1135     return true;
1136   }
1137 
1138   if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1139     // If the types mismatch and we can't handle it, reject reuse of the load.
1140     // If the stored value is larger or equal to the loaded value, we can reuse
1141     // it.
1142     if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1143       return false;
1144 
1145     // Can't forward from non-atomic to atomic without violating memory model.
1146     if (LD->isAtomic() < Load->isAtomic())
1147       return false;
1148 
1149     Res = AvailableValue::getLoad(LD);
1150     return true;
1151   }
1152 
1153   // Unknown def - must be conservative
1154   LLVM_DEBUG(
1155       // fast print dep, using operator<< on instruction is too slow.
1156       dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1157       dbgs() << " has unknown def " << *DepInst << '\n';);
1158   return false;
1159 }
1160 
1161 void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1162                                       AvailValInBlkVect &ValuesPerBlock,
1163                                       UnavailBlkVect &UnavailableBlocks) {
1164   // Filter out useless results (non-locals, etc).  Keep track of the blocks
1165   // where we have a value available in repl, also keep track of whether we see
1166   // dependencies that produce an unknown value for the load (such as a call
1167   // that could potentially clobber the load).
1168   unsigned NumDeps = Deps.size();
1169   for (unsigned i = 0, e = NumDeps; i != e; ++i) {
1170     BasicBlock *DepBB = Deps[i].getBB();
1171     MemDepResult DepInfo = Deps[i].getResult();
1172 
1173     if (DeadBlocks.count(DepBB)) {
1174       // Dead dependent mem-op disguise as a load evaluating the same value
1175       // as the load in question.
1176       ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1177       continue;
1178     }
1179 
1180     if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1181       UnavailableBlocks.push_back(DepBB);
1182       continue;
1183     }
1184 
1185     // The address being loaded in this non-local block may not be the same as
1186     // the pointer operand of the load if PHI translation occurs.  Make sure
1187     // to consider the right address.
1188     Value *Address = Deps[i].getAddress();
1189 
1190     AvailableValue AV;
1191     if (AnalyzeLoadAvailability(Load, DepInfo, Address, AV)) {
1192       // subtlety: because we know this was a non-local dependency, we know
1193       // it's safe to materialize anywhere between the instruction within
1194       // DepInfo and the end of it's block.
1195       ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1196                                                           std::move(AV)));
1197     } else {
1198       UnavailableBlocks.push_back(DepBB);
1199     }
1200   }
1201 
1202   assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1203          "post condition violation");
1204 }
1205 
1206 void GVNPass::eliminatePartiallyRedundantLoad(
1207     LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1208     MapVector<BasicBlock *, Value *> &AvailableLoads) {
1209   for (const auto &AvailableLoad : AvailableLoads) {
1210     BasicBlock *UnavailableBlock = AvailableLoad.first;
1211     Value *LoadPtr = AvailableLoad.second;
1212 
1213     auto *NewLoad =
1214         new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
1215                      Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
1216                      Load->getSyncScopeID(), UnavailableBlock->getTerminator());
1217     NewLoad->setDebugLoc(Load->getDebugLoc());
1218     if (MSSAU) {
1219       auto *MSSA = MSSAU->getMemorySSA();
1220       // Get the defining access of the original load or use the load if it is a
1221       // MemoryDef (e.g. because it is volatile). The inserted loads are
1222       // guaranteed to load from the same definition.
1223       auto *LoadAcc = MSSA->getMemoryAccess(Load);
1224       auto *DefiningAcc =
1225           isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
1226       auto *NewAccess = MSSAU->createMemoryAccessInBB(
1227           NewLoad, DefiningAcc, NewLoad->getParent(),
1228           MemorySSA::BeforeTerminator);
1229       if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1230         MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1231       else
1232         MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1233     }
1234 
1235     // Transfer the old load's AA tags to the new load.
1236     AAMDNodes Tags = Load->getAAMetadata();
1237     if (Tags)
1238       NewLoad->setAAMetadata(Tags);
1239 
1240     if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1241       NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1242     if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1243       NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1244     if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1245       NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1246     if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1247       if (LI &&
1248           LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1249         NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1250 
1251     // We do not propagate the old load's debug location, because the new
1252     // load now lives in a different BB, and we want to avoid a jumpy line
1253     // table.
1254     // FIXME: How do we retain source locations without causing poor debugging
1255     // behavior?
1256 
1257     // Add the newly created load.
1258     ValuesPerBlock.push_back(
1259         AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1260     MD->invalidateCachedPointerInfo(LoadPtr);
1261     LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1262   }
1263 
1264   // Perform PHI construction.
1265   Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1266   Load->replaceAllUsesWith(V);
1267   if (isa<PHINode>(V))
1268     V->takeName(Load);
1269   if (Instruction *I = dyn_cast<Instruction>(V))
1270     I->setDebugLoc(Load->getDebugLoc());
1271   if (V->getType()->isPtrOrPtrVectorTy())
1272     MD->invalidateCachedPointerInfo(V);
1273   markInstructionForDeletion(Load);
1274   ORE->emit([&]() {
1275     return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1276            << "load eliminated by PRE";
1277   });
1278 }
1279 
1280 bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1281                              UnavailBlkVect &UnavailableBlocks) {
1282   // Okay, we have *some* definitions of the value.  This means that the value
1283   // is available in some of our (transitive) predecessors.  Lets think about
1284   // doing PRE of this load.  This will involve inserting a new load into the
1285   // predecessor when it's not available.  We could do this in general, but
1286   // prefer to not increase code size.  As such, we only do this when we know
1287   // that we only have to insert *one* load (which means we're basically moving
1288   // the load, not inserting a new one).
1289 
1290   SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1291                                         UnavailableBlocks.end());
1292 
1293   // Let's find the first basic block with more than one predecessor.  Walk
1294   // backwards through predecessors if needed.
1295   BasicBlock *LoadBB = Load->getParent();
1296   BasicBlock *TmpBB = LoadBB;
1297 
1298   // Check that there is no implicit control flow instructions above our load in
1299   // its block. If there is an instruction that doesn't always pass the
1300   // execution to the following instruction, then moving through it may become
1301   // invalid. For example:
1302   //
1303   // int arr[LEN];
1304   // int index = ???;
1305   // ...
1306   // guard(0 <= index && index < LEN);
1307   // use(arr[index]);
1308   //
1309   // It is illegal to move the array access to any point above the guard,
1310   // because if the index is out of bounds we should deoptimize rather than
1311   // access the array.
1312   // Check that there is no guard in this block above our instruction.
1313   bool MustEnsureSafetyOfSpeculativeExecution =
1314       ICF->isDominatedByICFIFromSameBlock(Load);
1315 
1316   while (TmpBB->getSinglePredecessor()) {
1317     TmpBB = TmpBB->getSinglePredecessor();
1318     if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1319       return false;
1320     if (Blockers.count(TmpBB))
1321       return false;
1322 
1323     // If any of these blocks has more than one successor (i.e. if the edge we
1324     // just traversed was critical), then there are other paths through this
1325     // block along which the load may not be anticipated.  Hoisting the load
1326     // above this block would be adding the load to execution paths along
1327     // which it was not previously executed.
1328     if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1329       return false;
1330 
1331     // Check that there is no implicit control flow in a block above.
1332     MustEnsureSafetyOfSpeculativeExecution =
1333         MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1334   }
1335 
1336   assert(TmpBB);
1337   LoadBB = TmpBB;
1338 
1339   // Check to see how many predecessors have the loaded value fully
1340   // available.
1341   MapVector<BasicBlock *, Value *> PredLoads;
1342   DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1343   for (const AvailableValueInBlock &AV : ValuesPerBlock)
1344     FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1345   for (BasicBlock *UnavailableBB : UnavailableBlocks)
1346     FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1347 
1348   SmallVector<BasicBlock *, 4> CriticalEdgePred;
1349   for (BasicBlock *Pred : predecessors(LoadBB)) {
1350     // If any predecessor block is an EH pad that does not allow non-PHI
1351     // instructions before the terminator, we can't PRE the load.
1352     if (Pred->getTerminator()->isEHPad()) {
1353       LLVM_DEBUG(
1354           dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1355                  << Pred->getName() << "': " << *Load << '\n');
1356       return false;
1357     }
1358 
1359     if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1360       continue;
1361     }
1362 
1363     if (Pred->getTerminator()->getNumSuccessors() != 1) {
1364       if (isa<IndirectBrInst>(Pred->getTerminator())) {
1365         LLVM_DEBUG(
1366             dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1367                    << Pred->getName() << "': " << *Load << '\n');
1368         return false;
1369       }
1370 
1371       // FIXME: Can we support the fallthrough edge?
1372       if (isa<CallBrInst>(Pred->getTerminator())) {
1373         LLVM_DEBUG(
1374             dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
1375                    << Pred->getName() << "': " << *Load << '\n');
1376         return false;
1377       }
1378 
1379       if (LoadBB->isEHPad()) {
1380         LLVM_DEBUG(
1381             dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1382                    << Pred->getName() << "': " << *Load << '\n');
1383         return false;
1384       }
1385 
1386       // Do not split backedge as it will break the canonical loop form.
1387       if (!isLoadPRESplitBackedgeEnabled())
1388         if (DT->dominates(LoadBB, Pred)) {
1389           LLVM_DEBUG(
1390               dbgs()
1391               << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1392               << Pred->getName() << "': " << *Load << '\n');
1393           return false;
1394         }
1395 
1396       CriticalEdgePred.push_back(Pred);
1397     } else {
1398       // Only add the predecessors that will not be split for now.
1399       PredLoads[Pred] = nullptr;
1400     }
1401   }
1402 
1403   // Decide whether PRE is profitable for this load.
1404   unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1405   assert(NumUnavailablePreds != 0 &&
1406          "Fully available value should already be eliminated!");
1407 
1408   // If this load is unavailable in multiple predecessors, reject it.
1409   // FIXME: If we could restructure the CFG, we could make a common pred with
1410   // all the preds that don't have an available Load and insert a new load into
1411   // that one block.
1412   if (NumUnavailablePreds != 1)
1413       return false;
1414 
1415   // Now we know where we will insert load. We must ensure that it is safe
1416   // to speculatively execute the load at that points.
1417   if (MustEnsureSafetyOfSpeculativeExecution) {
1418     if (CriticalEdgePred.size())
1419       if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), DT))
1420         return false;
1421     for (auto &PL : PredLoads)
1422       if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), DT))
1423         return false;
1424   }
1425 
1426   // Split critical edges, and update the unavailable predecessors accordingly.
1427   for (BasicBlock *OrigPred : CriticalEdgePred) {
1428     BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1429     assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1430     PredLoads[NewPred] = nullptr;
1431     LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1432                       << LoadBB->getName() << '\n');
1433   }
1434 
1435   // Check if the load can safely be moved to all the unavailable predecessors.
1436   bool CanDoPRE = true;
1437   const DataLayout &DL = Load->getModule()->getDataLayout();
1438   SmallVector<Instruction*, 8> NewInsts;
1439   for (auto &PredLoad : PredLoads) {
1440     BasicBlock *UnavailablePred = PredLoad.first;
1441 
1442     // Do PHI translation to get its value in the predecessor if necessary.  The
1443     // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1444     // We do the translation for each edge we skipped by going from Load's block
1445     // to LoadBB, otherwise we might miss pieces needing translation.
1446 
1447     // If all preds have a single successor, then we know it is safe to insert
1448     // the load on the pred (?!?), so we can insert code to materialize the
1449     // pointer if it is not available.
1450     Value *LoadPtr = Load->getPointerOperand();
1451     BasicBlock *Cur = Load->getParent();
1452     while (Cur != LoadBB) {
1453       PHITransAddr Address(LoadPtr, DL, AC);
1454       LoadPtr = Address.PHITranslateWithInsertion(
1455           Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1456       if (!LoadPtr) {
1457         CanDoPRE = false;
1458         break;
1459       }
1460       Cur = Cur->getSinglePredecessor();
1461     }
1462 
1463     if (LoadPtr) {
1464       PHITransAddr Address(LoadPtr, DL, AC);
1465       LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1466                                                   NewInsts);
1467     }
1468     // If we couldn't find or insert a computation of this phi translated value,
1469     // we fail PRE.
1470     if (!LoadPtr) {
1471       LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1472                         << *Load->getPointerOperand() << "\n");
1473       CanDoPRE = false;
1474       break;
1475     }
1476 
1477     PredLoad.second = LoadPtr;
1478   }
1479 
1480   if (!CanDoPRE) {
1481     while (!NewInsts.empty()) {
1482       // Erase instructions generated by the failed PHI translation before
1483       // trying to number them. PHI translation might insert instructions
1484       // in basic blocks other than the current one, and we delete them
1485       // directly, as markInstructionForDeletion only allows removing from the
1486       // current basic block.
1487       NewInsts.pop_back_val()->eraseFromParent();
1488     }
1489     // HINT: Don't revert the edge-splitting as following transformation may
1490     // also need to split these critical edges.
1491     return !CriticalEdgePred.empty();
1492   }
1493 
1494   // Okay, we can eliminate this load by inserting a reload in the predecessor
1495   // and using PHI construction to get the value in the other predecessors, do
1496   // it.
1497   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1498   LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1499                                            << " INSTS: " << *NewInsts.back()
1500                                            << '\n');
1501 
1502   // Assign value numbers to the new instructions.
1503   for (Instruction *I : NewInsts) {
1504     // Instructions that have been inserted in predecessor(s) to materialize
1505     // the load address do not retain their original debug locations. Doing
1506     // so could lead to confusing (but correct) source attributions.
1507     I->updateLocationAfterHoist();
1508 
1509     // FIXME: We really _ought_ to insert these value numbers into their
1510     // parent's availability map.  However, in doing so, we risk getting into
1511     // ordering issues.  If a block hasn't been processed yet, we would be
1512     // marking a value as AVAIL-IN, which isn't what we intend.
1513     VN.lookupOrAdd(I);
1514   }
1515 
1516   eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
1517   ++NumPRELoad;
1518   return true;
1519 }
1520 
1521 bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1522                                  AvailValInBlkVect &ValuesPerBlock,
1523                                  UnavailBlkVect &UnavailableBlocks) {
1524   if (!LI)
1525     return false;
1526 
1527   const Loop *L = LI->getLoopFor(Load->getParent());
1528   // TODO: Generalize to other loop blocks that dominate the latch.
1529   if (!L || L->getHeader() != Load->getParent())
1530     return false;
1531 
1532   BasicBlock *Preheader = L->getLoopPreheader();
1533   BasicBlock *Latch = L->getLoopLatch();
1534   if (!Preheader || !Latch)
1535     return false;
1536 
1537   Value *LoadPtr = Load->getPointerOperand();
1538   // Must be available in preheader.
1539   if (!L->isLoopInvariant(LoadPtr))
1540     return false;
1541 
1542   // We plan to hoist the load to preheader without introducing a new fault.
1543   // In order to do it, we need to prove that we cannot side-exit the loop
1544   // once loop header is first entered before execution of the load.
1545   if (ICF->isDominatedByICFIFromSameBlock(Load))
1546     return false;
1547 
1548   BasicBlock *LoopBlock = nullptr;
1549   for (auto *Blocker : UnavailableBlocks) {
1550     // Blockers from outside the loop are handled in preheader.
1551     if (!L->contains(Blocker))
1552       continue;
1553 
1554     // Only allow one loop block. Loop header is not less frequently executed
1555     // than each loop block, and likely it is much more frequently executed. But
1556     // in case of multiple loop blocks, we need extra information (such as block
1557     // frequency info) to understand whether it is profitable to PRE into
1558     // multiple loop blocks.
1559     if (LoopBlock)
1560       return false;
1561 
1562     // Do not sink into inner loops. This may be non-profitable.
1563     if (L != LI->getLoopFor(Blocker))
1564       return false;
1565 
1566     // Blocks that dominate the latch execute on every single iteration, maybe
1567     // except the last one. So PREing into these blocks doesn't make much sense
1568     // in most cases. But the blocks that do not necessarily execute on each
1569     // iteration are sometimes much colder than the header, and this is when
1570     // PRE is potentially profitable.
1571     if (DT->dominates(Blocker, Latch))
1572       return false;
1573 
1574     // Make sure that the terminator itself doesn't clobber.
1575     if (Blocker->getTerminator()->mayWriteToMemory())
1576       return false;
1577 
1578     LoopBlock = Blocker;
1579   }
1580 
1581   if (!LoopBlock)
1582     return false;
1583 
1584   // Make sure the memory at this pointer cannot be freed, therefore we can
1585   // safely reload from it after clobber.
1586   if (LoadPtr->canBeFreed())
1587     return false;
1588 
1589   // TODO: Support critical edge splitting if blocker has more than 1 successor.
1590   MapVector<BasicBlock *, Value *> AvailableLoads;
1591   AvailableLoads[LoopBlock] = LoadPtr;
1592   AvailableLoads[Preheader] = LoadPtr;
1593 
1594   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1595   eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads);
1596   ++NumPRELoopLoad;
1597   return true;
1598 }
1599 
1600 static void reportLoadElim(LoadInst *Load, Value *AvailableValue,
1601                            OptimizationRemarkEmitter *ORE) {
1602   using namespace ore;
1603 
1604   ORE->emit([&]() {
1605     return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1606            << "load of type " << NV("Type", Load->getType()) << " eliminated"
1607            << setExtraArgs() << " in favor of "
1608            << NV("InfavorOfValue", AvailableValue);
1609   });
1610 }
1611 
1612 /// Attempt to eliminate a load whose dependencies are
1613 /// non-local by performing PHI construction.
1614 bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1615   // non-local speculations are not allowed under asan.
1616   if (Load->getParent()->getParent()->hasFnAttribute(
1617           Attribute::SanitizeAddress) ||
1618       Load->getParent()->getParent()->hasFnAttribute(
1619           Attribute::SanitizeHWAddress))
1620     return false;
1621 
1622   // Step 1: Find the non-local dependencies of the load.
1623   LoadDepVect Deps;
1624   MD->getNonLocalPointerDependency(Load, Deps);
1625 
1626   // If we had to process more than one hundred blocks to find the
1627   // dependencies, this load isn't worth worrying about.  Optimizing
1628   // it will be too expensive.
1629   unsigned NumDeps = Deps.size();
1630   if (NumDeps > MaxNumDeps)
1631     return false;
1632 
1633   // If we had a phi translation failure, we'll have a single entry which is a
1634   // clobber in the current block.  Reject this early.
1635   if (NumDeps == 1 &&
1636       !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1637     LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1638                dbgs() << " has unknown dependencies\n";);
1639     return false;
1640   }
1641 
1642   bool Changed = false;
1643   // If this load follows a GEP, see if we can PRE the indices before analyzing.
1644   if (GetElementPtrInst *GEP =
1645           dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1646     for (Use &U : GEP->indices())
1647       if (Instruction *I = dyn_cast<Instruction>(U.get()))
1648         Changed |= performScalarPRE(I);
1649   }
1650 
1651   // Step 2: Analyze the availability of the load
1652   AvailValInBlkVect ValuesPerBlock;
1653   UnavailBlkVect UnavailableBlocks;
1654   AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1655 
1656   // If we have no predecessors that produce a known value for this load, exit
1657   // early.
1658   if (ValuesPerBlock.empty())
1659     return Changed;
1660 
1661   // Step 3: Eliminate fully redundancy.
1662   //
1663   // If all of the instructions we depend on produce a known value for this
1664   // load, then it is fully redundant and we can use PHI insertion to compute
1665   // its value.  Insert PHIs and remove the fully redundant value now.
1666   if (UnavailableBlocks.empty()) {
1667     LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1668 
1669     // Perform PHI construction.
1670     Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1671     Load->replaceAllUsesWith(V);
1672 
1673     if (isa<PHINode>(V))
1674       V->takeName(Load);
1675     if (Instruction *I = dyn_cast<Instruction>(V))
1676       // If instruction I has debug info, then we should not update it.
1677       // Also, if I has a null DebugLoc, then it is still potentially incorrect
1678       // to propagate Load's DebugLoc because Load may not post-dominate I.
1679       if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1680         I->setDebugLoc(Load->getDebugLoc());
1681     if (V->getType()->isPtrOrPtrVectorTy())
1682       MD->invalidateCachedPointerInfo(V);
1683     markInstructionForDeletion(Load);
1684     ++NumGVNLoad;
1685     reportLoadElim(Load, V, ORE);
1686     return true;
1687   }
1688 
1689   // Step 4: Eliminate partial redundancy.
1690   if (!isPREEnabled() || !isLoadPREEnabled())
1691     return Changed;
1692   if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent()))
1693     return Changed;
1694 
1695   if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1696       PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1697     return true;
1698 
1699   return Changed;
1700 }
1701 
1702 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1703   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1704     return true;
1705 
1706   // Floating point comparisons can be equal, but not equivalent.  Cases:
1707   // NaNs for unordered operators
1708   // +0.0 vs 0.0 for all operators
1709   if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1710       (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1711        Cmp->getFastMathFlags().noNaNs())) {
1712       Value *LHS = Cmp->getOperand(0);
1713       Value *RHS = Cmp->getOperand(1);
1714       // If we can prove either side non-zero, then equality must imply
1715       // equivalence.
1716       // FIXME: We should do this optimization if 'no signed zeros' is
1717       // applicable via an instruction-level fast-math-flag or some other
1718       // indicator that relaxed FP semantics are being used.
1719       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1720         return true;
1721       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1722         return true;;
1723       // TODO: Handle vector floating point constants
1724   }
1725   return false;
1726 }
1727 
1728 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1729   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1730     return true;
1731 
1732   // Floating point comparisons can be equal, but not equivelent.  Cases:
1733   // NaNs for unordered operators
1734   // +0.0 vs 0.0 for all operators
1735   if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1736        Cmp->getFastMathFlags().noNaNs()) ||
1737       Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1738       Value *LHS = Cmp->getOperand(0);
1739       Value *RHS = Cmp->getOperand(1);
1740       // If we can prove either side non-zero, then equality must imply
1741       // equivalence.
1742       // FIXME: We should do this optimization if 'no signed zeros' is
1743       // applicable via an instruction-level fast-math-flag or some other
1744       // indicator that relaxed FP semantics are being used.
1745       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1746         return true;
1747       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1748         return true;;
1749       // TODO: Handle vector floating point constants
1750   }
1751   return false;
1752 }
1753 
1754 
1755 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1756   for (User *U : V->users())
1757     if (isa<Instruction>(U) &&
1758         cast<Instruction>(U)->getParent() == BB)
1759       return true;
1760   return false;
1761 }
1762 
1763 bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
1764   Value *V = IntrinsicI->getArgOperand(0);
1765 
1766   if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1767     if (Cond->isZero()) {
1768       Type *Int8Ty = Type::getInt8Ty(V->getContext());
1769       // Insert a new store to null instruction before the load to indicate that
1770       // this code is not reachable.  FIXME: We could insert unreachable
1771       // instruction directly because we can modify the CFG.
1772       auto *NewS = new StoreInst(UndefValue::get(Int8Ty),
1773                                  Constant::getNullValue(Int8Ty->getPointerTo()),
1774                                  IntrinsicI);
1775       if (MSSAU) {
1776         const MemoryUseOrDef *FirstNonDom = nullptr;
1777         const auto *AL =
1778             MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1779 
1780         // If there are accesses in the current basic block, find the first one
1781         // that does not come before NewS. The new memory access is inserted
1782         // after the found access or before the terminator if no such access is
1783         // found.
1784         if (AL) {
1785           for (auto &Acc : *AL) {
1786             if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1787               if (!Current->getMemoryInst()->comesBefore(NewS)) {
1788                 FirstNonDom = Current;
1789                 break;
1790               }
1791           }
1792         }
1793 
1794         // This added store is to null, so it will never executed and we can
1795         // just use the LiveOnEntry def as defining access.
1796         auto *NewDef =
1797             FirstNonDom ? MSSAU->createMemoryAccessBefore(
1798                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1799                               const_cast<MemoryUseOrDef *>(FirstNonDom))
1800                         : MSSAU->createMemoryAccessInBB(
1801                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1802                               NewS->getParent(), MemorySSA::BeforeTerminator);
1803 
1804         MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1805       }
1806     }
1807     if (isAssumeWithEmptyBundle(*IntrinsicI))
1808       markInstructionForDeletion(IntrinsicI);
1809     return false;
1810   } else if (isa<Constant>(V)) {
1811     // If it's not false, and constant, it must evaluate to true. This means our
1812     // assume is assume(true), and thus, pointless, and we don't want to do
1813     // anything more here.
1814     return false;
1815   }
1816 
1817   Constant *True = ConstantInt::getTrue(V->getContext());
1818   bool Changed = false;
1819 
1820   for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1821     BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1822 
1823     // This property is only true in dominated successors, propagateEquality
1824     // will check dominance for us.
1825     Changed |= propagateEquality(V, True, Edge, false);
1826   }
1827 
1828   // We can replace assume value with true, which covers cases like this:
1829   // call void @llvm.assume(i1 %cmp)
1830   // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1831   ReplaceOperandsWithMap[V] = True;
1832 
1833   // Similarly, after assume(!NotV) we know that NotV == false.
1834   Value *NotV;
1835   if (match(V, m_Not(m_Value(NotV))))
1836     ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1837 
1838   // If we find an equality fact, canonicalize all dominated uses in this block
1839   // to one of the two values.  We heuristically choice the "oldest" of the
1840   // two where age is determined by value number. (Note that propagateEquality
1841   // above handles the cross block case.)
1842   //
1843   // Key case to cover are:
1844   // 1)
1845   // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1846   // call void @llvm.assume(i1 %cmp)
1847   // ret float %0 ; will change it to ret float 3.000000e+00
1848   // 2)
1849   // %load = load float, float* %addr
1850   // %cmp = fcmp oeq float %load, %0
1851   // call void @llvm.assume(i1 %cmp)
1852   // ret float %load ; will change it to ret float %0
1853   if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1854     if (impliesEquivalanceIfTrue(CmpI)) {
1855       Value *CmpLHS = CmpI->getOperand(0);
1856       Value *CmpRHS = CmpI->getOperand(1);
1857       // Heuristically pick the better replacement -- the choice of heuristic
1858       // isn't terribly important here, but the fact we canonicalize on some
1859       // replacement is for exposing other simplifications.
1860       // TODO: pull this out as a helper function and reuse w/existing
1861       // (slightly different) logic.
1862       if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1863         std::swap(CmpLHS, CmpRHS);
1864       if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1865         std::swap(CmpLHS, CmpRHS);
1866       if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1867           (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1868         // Move the 'oldest' value to the right-hand side, using the value
1869         // number as a proxy for age.
1870         uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1871         uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1872         if (LVN < RVN)
1873           std::swap(CmpLHS, CmpRHS);
1874       }
1875 
1876       // Handle degenerate case where we either haven't pruned a dead path or a
1877       // removed a trivial assume yet.
1878       if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1879         return Changed;
1880 
1881       LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1882                  << *CmpLHS << " with "
1883                  << *CmpRHS << " in block "
1884                  << IntrinsicI->getParent()->getName() << "\n");
1885 
1886 
1887       // Setup the replacement map - this handles uses within the same block
1888       if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1889         ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1890 
1891       // NOTE: The non-block local cases are handled by the call to
1892       // propagateEquality above; this block is just about handling the block
1893       // local cases.  TODO: There's a bunch of logic in propagateEqualiy which
1894       // isn't duplicated for the block local case, can we share it somehow?
1895     }
1896   }
1897   return Changed;
1898 }
1899 
1900 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
1901   patchReplacementInstruction(I, Repl);
1902   I->replaceAllUsesWith(Repl);
1903 }
1904 
1905 /// Attempt to eliminate a load, first by eliminating it
1906 /// locally, and then attempting non-local elimination if that fails.
1907 bool GVNPass::processLoad(LoadInst *L) {
1908   if (!MD)
1909     return false;
1910 
1911   // This code hasn't been audited for ordered or volatile memory access
1912   if (!L->isUnordered())
1913     return false;
1914 
1915   if (L->use_empty()) {
1916     markInstructionForDeletion(L);
1917     return true;
1918   }
1919 
1920   // ... to a pointer that has been loaded from before...
1921   MemDepResult Dep = MD->getDependency(L);
1922 
1923   // If it is defined in another block, try harder.
1924   if (Dep.isNonLocal())
1925     return processNonLocalLoad(L);
1926 
1927   // Only handle the local case below
1928   if (!Dep.isDef() && !Dep.isClobber()) {
1929     // This might be a NonFuncLocal or an Unknown
1930     LLVM_DEBUG(
1931         // fast print dep, using operator<< on instruction is too slow.
1932         dbgs() << "GVN: load "; L->printAsOperand(dbgs());
1933         dbgs() << " has unknown dependence\n";);
1934     return false;
1935   }
1936 
1937   AvailableValue AV;
1938   if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
1939     Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
1940 
1941     // Replace the load!
1942     patchAndReplaceAllUsesWith(L, AvailableValue);
1943     markInstructionForDeletion(L);
1944     if (MSSAU)
1945       MSSAU->removeMemoryAccess(L);
1946     ++NumGVNLoad;
1947     reportLoadElim(L, AvailableValue, ORE);
1948     // Tell MDA to reexamine the reused pointer since we might have more
1949     // information after forwarding it.
1950     if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
1951       MD->invalidateCachedPointerInfo(AvailableValue);
1952     return true;
1953   }
1954 
1955   return false;
1956 }
1957 
1958 /// Return a pair the first field showing the value number of \p Exp and the
1959 /// second field showing whether it is a value number newly created.
1960 std::pair<uint32_t, bool>
1961 GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
1962   uint32_t &e = expressionNumbering[Exp];
1963   bool CreateNewValNum = !e;
1964   if (CreateNewValNum) {
1965     Expressions.push_back(Exp);
1966     if (ExprIdx.size() < nextValueNumber + 1)
1967       ExprIdx.resize(nextValueNumber * 2);
1968     e = nextValueNumber;
1969     ExprIdx[nextValueNumber++] = nextExprNumber++;
1970   }
1971   return {e, CreateNewValNum};
1972 }
1973 
1974 /// Return whether all the values related with the same \p num are
1975 /// defined in \p BB.
1976 bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
1977                                          GVNPass &Gvn) {
1978   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1979   while (Vals && Vals->BB == BB)
1980     Vals = Vals->Next;
1981   return !Vals;
1982 }
1983 
1984 /// Wrap phiTranslateImpl to provide caching functionality.
1985 uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
1986                                            const BasicBlock *PhiBlock,
1987                                            uint32_t Num, GVNPass &Gvn) {
1988   auto FindRes = PhiTranslateTable.find({Num, Pred});
1989   if (FindRes != PhiTranslateTable.end())
1990     return FindRes->second;
1991   uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
1992   PhiTranslateTable.insert({{Num, Pred}, NewNum});
1993   return NewNum;
1994 }
1995 
1996 // Return true if the value number \p Num and NewNum have equal value.
1997 // Return false if the result is unknown.
1998 bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
1999                                            const BasicBlock *Pred,
2000                                            const BasicBlock *PhiBlock,
2001                                            GVNPass &Gvn) {
2002   CallInst *Call = nullptr;
2003   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2004   while (Vals) {
2005     Call = dyn_cast<CallInst>(Vals->Val);
2006     if (Call && Call->getParent() == PhiBlock)
2007       break;
2008     Vals = Vals->Next;
2009   }
2010 
2011   if (AA->doesNotAccessMemory(Call))
2012     return true;
2013 
2014   if (!MD || !AA->onlyReadsMemory(Call))
2015     return false;
2016 
2017   MemDepResult local_dep = MD->getDependency(Call);
2018   if (!local_dep.isNonLocal())
2019     return false;
2020 
2021   const MemoryDependenceResults::NonLocalDepInfo &deps =
2022       MD->getNonLocalCallDependency(Call);
2023 
2024   // Check to see if the Call has no function local clobber.
2025   for (const NonLocalDepEntry &D : deps) {
2026     if (D.getResult().isNonFuncLocal())
2027       return true;
2028   }
2029   return false;
2030 }
2031 
2032 /// Translate value number \p Num using phis, so that it has the values of
2033 /// the phis in BB.
2034 uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2035                                                const BasicBlock *PhiBlock,
2036                                                uint32_t Num, GVNPass &Gvn) {
2037   if (PHINode *PN = NumberingPhi[Num]) {
2038     for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2039       if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2040         if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2041           return TransVal;
2042     }
2043     return Num;
2044   }
2045 
2046   // If there is any value related with Num is defined in a BB other than
2047   // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2048   // a backedge. We can do an early exit in that case to save compile time.
2049   if (!areAllValsInBB(Num, PhiBlock, Gvn))
2050     return Num;
2051 
2052   if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2053     return Num;
2054   Expression Exp = Expressions[ExprIdx[Num]];
2055 
2056   for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2057     // For InsertValue and ExtractValue, some varargs are index numbers
2058     // instead of value numbers. Those index numbers should not be
2059     // translated.
2060     if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2061         (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2062         (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2063       continue;
2064     Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2065   }
2066 
2067   if (Exp.commutative) {
2068     assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2069     if (Exp.varargs[0] > Exp.varargs[1]) {
2070       std::swap(Exp.varargs[0], Exp.varargs[1]);
2071       uint32_t Opcode = Exp.opcode >> 8;
2072       if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2073         Exp.opcode = (Opcode << 8) |
2074                      CmpInst::getSwappedPredicate(
2075                          static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2076     }
2077   }
2078 
2079   if (uint32_t NewNum = expressionNumbering[Exp]) {
2080     if (Exp.opcode == Instruction::Call && NewNum != Num)
2081       return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2082     return NewNum;
2083   }
2084   return Num;
2085 }
2086 
2087 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2088 /// again.
2089 void GVNPass::ValueTable::eraseTranslateCacheEntry(
2090     uint32_t Num, const BasicBlock &CurrBlock) {
2091   for (const BasicBlock *Pred : predecessors(&CurrBlock))
2092     PhiTranslateTable.erase({Num, Pred});
2093 }
2094 
2095 // In order to find a leader for a given value number at a
2096 // specific basic block, we first obtain the list of all Values for that number,
2097 // and then scan the list to find one whose block dominates the block in
2098 // question.  This is fast because dominator tree queries consist of only
2099 // a few comparisons of DFS numbers.
2100 Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
2101   LeaderTableEntry Vals = LeaderTable[num];
2102   if (!Vals.Val) return nullptr;
2103 
2104   Value *Val = nullptr;
2105   if (DT->dominates(Vals.BB, BB)) {
2106     Val = Vals.Val;
2107     if (isa<Constant>(Val)) return Val;
2108   }
2109 
2110   LeaderTableEntry* Next = Vals.Next;
2111   while (Next) {
2112     if (DT->dominates(Next->BB, BB)) {
2113       if (isa<Constant>(Next->Val)) return Next->Val;
2114       if (!Val) Val = Next->Val;
2115     }
2116 
2117     Next = Next->Next;
2118   }
2119 
2120   return Val;
2121 }
2122 
2123 /// There is an edge from 'Src' to 'Dst'.  Return
2124 /// true if every path from the entry block to 'Dst' passes via this edge.  In
2125 /// particular 'Dst' must not be reachable via another edge from 'Src'.
2126 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
2127                                        DominatorTree *DT) {
2128   // While in theory it is interesting to consider the case in which Dst has
2129   // more than one predecessor, because Dst might be part of a loop which is
2130   // only reachable from Src, in practice it is pointless since at the time
2131   // GVN runs all such loops have preheaders, which means that Dst will have
2132   // been changed to have only one predecessor, namely Src.
2133   const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2134   assert((!Pred || Pred == E.getStart()) &&
2135          "No edge between these basic blocks!");
2136   return Pred != nullptr;
2137 }
2138 
2139 void GVNPass::assignBlockRPONumber(Function &F) {
2140   BlockRPONumber.clear();
2141   uint32_t NextBlockNumber = 1;
2142   ReversePostOrderTraversal<Function *> RPOT(&F);
2143   for (BasicBlock *BB : RPOT)
2144     BlockRPONumber[BB] = NextBlockNumber++;
2145   InvalidBlockRPONumbers = false;
2146 }
2147 
2148 bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2149   bool Changed = false;
2150   for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2151     Value *Operand = Instr->getOperand(OpNum);
2152     auto it = ReplaceOperandsWithMap.find(Operand);
2153     if (it != ReplaceOperandsWithMap.end()) {
2154       LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2155                         << *it->second << " in instruction " << *Instr << '\n');
2156       Instr->setOperand(OpNum, it->second);
2157       Changed = true;
2158     }
2159   }
2160   return Changed;
2161 }
2162 
2163 /// The given values are known to be equal in every block
2164 /// dominated by 'Root'.  Exploit this, for example by replacing 'LHS' with
2165 /// 'RHS' everywhere in the scope.  Returns whether a change was made.
2166 /// If DominatesByEdge is false, then it means that we will propagate the RHS
2167 /// value starting from the end of Root.Start.
2168 bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
2169                                 const BasicBlockEdge &Root,
2170                                 bool DominatesByEdge) {
2171   SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2172   Worklist.push_back(std::make_pair(LHS, RHS));
2173   bool Changed = false;
2174   // For speed, compute a conservative fast approximation to
2175   // DT->dominates(Root, Root.getEnd());
2176   const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2177 
2178   while (!Worklist.empty()) {
2179     std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2180     LHS = Item.first; RHS = Item.second;
2181 
2182     if (LHS == RHS)
2183       continue;
2184     assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2185 
2186     // Don't try to propagate equalities between constants.
2187     if (isa<Constant>(LHS) && isa<Constant>(RHS))
2188       continue;
2189 
2190     // Prefer a constant on the right-hand side, or an Argument if no constants.
2191     if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2192       std::swap(LHS, RHS);
2193     assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2194 
2195     // If there is no obvious reason to prefer the left-hand side over the
2196     // right-hand side, ensure the longest lived term is on the right-hand side,
2197     // so the shortest lived term will be replaced by the longest lived.
2198     // This tends to expose more simplifications.
2199     uint32_t LVN = VN.lookupOrAdd(LHS);
2200     if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2201         (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2202       // Move the 'oldest' value to the right-hand side, using the value number
2203       // as a proxy for age.
2204       uint32_t RVN = VN.lookupOrAdd(RHS);
2205       if (LVN < RVN) {
2206         std::swap(LHS, RHS);
2207         LVN = RVN;
2208       }
2209     }
2210 
2211     // If value numbering later sees that an instruction in the scope is equal
2212     // to 'LHS' then ensure it will be turned into 'RHS'.  In order to preserve
2213     // the invariant that instructions only occur in the leader table for their
2214     // own value number (this is used by removeFromLeaderTable), do not do this
2215     // if RHS is an instruction (if an instruction in the scope is morphed into
2216     // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2217     // using the leader table is about compiling faster, not optimizing better).
2218     // The leader table only tracks basic blocks, not edges. Only add to if we
2219     // have the simple case where the edge dominates the end.
2220     if (RootDominatesEnd && !isa<Instruction>(RHS))
2221       addToLeaderTable(LVN, RHS, Root.getEnd());
2222 
2223     // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope.  As
2224     // LHS always has at least one use that is not dominated by Root, this will
2225     // never do anything if LHS has only one use.
2226     if (!LHS->hasOneUse()) {
2227       unsigned NumReplacements =
2228           DominatesByEdge
2229               ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2230               : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2231 
2232       Changed |= NumReplacements > 0;
2233       NumGVNEqProp += NumReplacements;
2234       // Cached information for anything that uses LHS will be invalid.
2235       if (MD)
2236         MD->invalidateCachedPointerInfo(LHS);
2237     }
2238 
2239     // Now try to deduce additional equalities from this one. For example, if
2240     // the known equality was "(A != B)" == "false" then it follows that A and B
2241     // are equal in the scope. Only boolean equalities with an explicit true or
2242     // false RHS are currently supported.
2243     if (!RHS->getType()->isIntegerTy(1))
2244       // Not a boolean equality - bail out.
2245       continue;
2246     ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2247     if (!CI)
2248       // RHS neither 'true' nor 'false' - bail out.
2249       continue;
2250     // Whether RHS equals 'true'.  Otherwise it equals 'false'.
2251     bool isKnownTrue = CI->isMinusOne();
2252     bool isKnownFalse = !isKnownTrue;
2253 
2254     // If "A && B" is known true then both A and B are known true.  If "A || B"
2255     // is known false then both A and B are known false.
2256     Value *A, *B;
2257     if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2258         (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2259       Worklist.push_back(std::make_pair(A, RHS));
2260       Worklist.push_back(std::make_pair(B, RHS));
2261       continue;
2262     }
2263 
2264     // If we are propagating an equality like "(A == B)" == "true" then also
2265     // propagate the equality A == B.  When propagating a comparison such as
2266     // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2267     if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2268       Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2269 
2270       // If "A == B" is known true, or "A != B" is known false, then replace
2271       // A with B everywhere in the scope.  For floating point operations, we
2272       // have to be careful since equality does not always imply equivalance.
2273       if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2274           (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2275         Worklist.push_back(std::make_pair(Op0, Op1));
2276 
2277       // If "A >= B" is known true, replace "A < B" with false everywhere.
2278       CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2279       Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2280       // Since we don't have the instruction "A < B" immediately to hand, work
2281       // out the value number that it would have and use that to find an
2282       // appropriate instruction (if any).
2283       uint32_t NextNum = VN.getNextUnusedValueNumber();
2284       uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2285       // If the number we were assigned was brand new then there is no point in
2286       // looking for an instruction realizing it: there cannot be one!
2287       if (Num < NextNum) {
2288         Value *NotCmp = findLeader(Root.getEnd(), Num);
2289         if (NotCmp && isa<Instruction>(NotCmp)) {
2290           unsigned NumReplacements =
2291               DominatesByEdge
2292                   ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2293                   : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2294                                              Root.getStart());
2295           Changed |= NumReplacements > 0;
2296           NumGVNEqProp += NumReplacements;
2297           // Cached information for anything that uses NotCmp will be invalid.
2298           if (MD)
2299             MD->invalidateCachedPointerInfo(NotCmp);
2300         }
2301       }
2302       // Ensure that any instruction in scope that gets the "A < B" value number
2303       // is replaced with false.
2304       // The leader table only tracks basic blocks, not edges. Only add to if we
2305       // have the simple case where the edge dominates the end.
2306       if (RootDominatesEnd)
2307         addToLeaderTable(Num, NotVal, Root.getEnd());
2308 
2309       continue;
2310     }
2311   }
2312 
2313   return Changed;
2314 }
2315 
2316 /// When calculating availability, handle an instruction
2317 /// by inserting it into the appropriate sets
2318 bool GVNPass::processInstruction(Instruction *I) {
2319   // Ignore dbg info intrinsics.
2320   if (isa<DbgInfoIntrinsic>(I))
2321     return false;
2322 
2323   // If the instruction can be easily simplified then do so now in preference
2324   // to value numbering it.  Value numbering often exposes redundancies, for
2325   // example if it determines that %y is equal to %x then the instruction
2326   // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2327   const DataLayout &DL = I->getModule()->getDataLayout();
2328   if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
2329     bool Changed = false;
2330     if (!I->use_empty()) {
2331       // Simplification can cause a special instruction to become not special.
2332       // For example, devirtualization to a willreturn function.
2333       ICF->removeUsersOf(I);
2334       I->replaceAllUsesWith(V);
2335       Changed = true;
2336     }
2337     if (isInstructionTriviallyDead(I, TLI)) {
2338       markInstructionForDeletion(I);
2339       Changed = true;
2340     }
2341     if (Changed) {
2342       if (MD && V->getType()->isPtrOrPtrVectorTy())
2343         MD->invalidateCachedPointerInfo(V);
2344       ++NumGVNSimpl;
2345       return true;
2346     }
2347   }
2348 
2349   if (auto *Assume = dyn_cast<AssumeInst>(I))
2350     return processAssumeIntrinsic(Assume);
2351 
2352   if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2353     if (processLoad(Load))
2354       return true;
2355 
2356     unsigned Num = VN.lookupOrAdd(Load);
2357     addToLeaderTable(Num, Load, Load->getParent());
2358     return false;
2359   }
2360 
2361   // For conditional branches, we can perform simple conditional propagation on
2362   // the condition value itself.
2363   if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2364     if (!BI->isConditional())
2365       return false;
2366 
2367     if (isa<Constant>(BI->getCondition()))
2368       return processFoldableCondBr(BI);
2369 
2370     Value *BranchCond = BI->getCondition();
2371     BasicBlock *TrueSucc = BI->getSuccessor(0);
2372     BasicBlock *FalseSucc = BI->getSuccessor(1);
2373     // Avoid multiple edges early.
2374     if (TrueSucc == FalseSucc)
2375       return false;
2376 
2377     BasicBlock *Parent = BI->getParent();
2378     bool Changed = false;
2379 
2380     Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2381     BasicBlockEdge TrueE(Parent, TrueSucc);
2382     Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2383 
2384     Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2385     BasicBlockEdge FalseE(Parent, FalseSucc);
2386     Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2387 
2388     return Changed;
2389   }
2390 
2391   // For switches, propagate the case values into the case destinations.
2392   if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2393     Value *SwitchCond = SI->getCondition();
2394     BasicBlock *Parent = SI->getParent();
2395     bool Changed = false;
2396 
2397     // Remember how many outgoing edges there are to every successor.
2398     SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2399     for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2400       ++SwitchEdges[SI->getSuccessor(i)];
2401 
2402     for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2403          i != e; ++i) {
2404       BasicBlock *Dst = i->getCaseSuccessor();
2405       // If there is only a single edge, propagate the case value into it.
2406       if (SwitchEdges.lookup(Dst) == 1) {
2407         BasicBlockEdge E(Parent, Dst);
2408         Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2409       }
2410     }
2411     return Changed;
2412   }
2413 
2414   // Instructions with void type don't return a value, so there's
2415   // no point in trying to find redundancies in them.
2416   if (I->getType()->isVoidTy())
2417     return false;
2418 
2419   uint32_t NextNum = VN.getNextUnusedValueNumber();
2420   unsigned Num = VN.lookupOrAdd(I);
2421 
2422   // Allocations are always uniquely numbered, so we can save time and memory
2423   // by fast failing them.
2424   if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2425     addToLeaderTable(Num, I, I->getParent());
2426     return false;
2427   }
2428 
2429   // If the number we were assigned was a brand new VN, then we don't
2430   // need to do a lookup to see if the number already exists
2431   // somewhere in the domtree: it can't!
2432   if (Num >= NextNum) {
2433     addToLeaderTable(Num, I, I->getParent());
2434     return false;
2435   }
2436 
2437   // Perform fast-path value-number based elimination of values inherited from
2438   // dominators.
2439   Value *Repl = findLeader(I->getParent(), Num);
2440   if (!Repl) {
2441     // Failure, just remember this instance for future use.
2442     addToLeaderTable(Num, I, I->getParent());
2443     return false;
2444   } else if (Repl == I) {
2445     // If I was the result of a shortcut PRE, it might already be in the table
2446     // and the best replacement for itself. Nothing to do.
2447     return false;
2448   }
2449 
2450   // Remove it!
2451   patchAndReplaceAllUsesWith(I, Repl);
2452   if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2453     MD->invalidateCachedPointerInfo(Repl);
2454   markInstructionForDeletion(I);
2455   return true;
2456 }
2457 
2458 /// runOnFunction - This is the main transformation entry point for a function.
2459 bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2460                       const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2461                       MemoryDependenceResults *RunMD, LoopInfo *LI,
2462                       OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2463   AC = &RunAC;
2464   DT = &RunDT;
2465   VN.setDomTree(DT);
2466   TLI = &RunTLI;
2467   VN.setAliasAnalysis(&RunAA);
2468   MD = RunMD;
2469   ImplicitControlFlowTracking ImplicitCFT;
2470   ICF = &ImplicitCFT;
2471   this->LI = LI;
2472   VN.setMemDep(MD);
2473   ORE = RunORE;
2474   InvalidBlockRPONumbers = true;
2475   MemorySSAUpdater Updater(MSSA);
2476   MSSAU = MSSA ? &Updater : nullptr;
2477 
2478   bool Changed = false;
2479   bool ShouldContinue = true;
2480 
2481   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2482   // Merge unconditional branches, allowing PRE to catch more
2483   // optimization opportunities.
2484   for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
2485     bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, LI, MSSAU, MD);
2486     if (removedBlock)
2487       ++NumGVNBlocks;
2488 
2489     Changed |= removedBlock;
2490   }
2491 
2492   unsigned Iteration = 0;
2493   while (ShouldContinue) {
2494     LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2495     ShouldContinue = iterateOnFunction(F);
2496     Changed |= ShouldContinue;
2497     ++Iteration;
2498   }
2499 
2500   if (isPREEnabled()) {
2501     // Fabricate val-num for dead-code in order to suppress assertion in
2502     // performPRE().
2503     assignValNumForDeadCode();
2504     bool PREChanged = true;
2505     while (PREChanged) {
2506       PREChanged = performPRE(F);
2507       Changed |= PREChanged;
2508     }
2509   }
2510 
2511   // FIXME: Should perform GVN again after PRE does something.  PRE can move
2512   // computations into blocks where they become fully redundant.  Note that
2513   // we can't do this until PRE's critical edge splitting updates memdep.
2514   // Actually, when this happens, we should just fully integrate PRE into GVN.
2515 
2516   cleanupGlobalSets();
2517   // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2518   // iteration.
2519   DeadBlocks.clear();
2520 
2521   if (MSSA && VerifyMemorySSA)
2522     MSSA->verifyMemorySSA();
2523 
2524   return Changed;
2525 }
2526 
2527 bool GVNPass::processBlock(BasicBlock *BB) {
2528   // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2529   // (and incrementing BI before processing an instruction).
2530   assert(InstrsToErase.empty() &&
2531          "We expect InstrsToErase to be empty across iterations");
2532   if (DeadBlocks.count(BB))
2533     return false;
2534 
2535   // Clearing map before every BB because it can be used only for single BB.
2536   ReplaceOperandsWithMap.clear();
2537   bool ChangedFunction = false;
2538 
2539   // Since we may not have visited the input blocks of the phis, we can't
2540   // use our normal hash approach for phis.  Instead, simply look for
2541   // obvious duplicates.  The first pass of GVN will tend to create
2542   // identical phis, and the second or later passes can eliminate them.
2543   ChangedFunction |= EliminateDuplicatePHINodes(BB);
2544 
2545   for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2546        BI != BE;) {
2547     if (!ReplaceOperandsWithMap.empty())
2548       ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2549     ChangedFunction |= processInstruction(&*BI);
2550 
2551     if (InstrsToErase.empty()) {
2552       ++BI;
2553       continue;
2554     }
2555 
2556     // If we need some instructions deleted, do it now.
2557     NumGVNInstr += InstrsToErase.size();
2558 
2559     // Avoid iterator invalidation.
2560     bool AtStart = BI == BB->begin();
2561     if (!AtStart)
2562       --BI;
2563 
2564     for (auto *I : InstrsToErase) {
2565       assert(I->getParent() == BB && "Removing instruction from wrong block?");
2566       LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2567       salvageKnowledge(I, AC);
2568       salvageDebugInfo(*I);
2569       if (MD) MD->removeInstruction(I);
2570       if (MSSAU)
2571         MSSAU->removeMemoryAccess(I);
2572       LLVM_DEBUG(verifyRemoved(I));
2573       ICF->removeInstruction(I);
2574       I->eraseFromParent();
2575     }
2576     InstrsToErase.clear();
2577 
2578     if (AtStart)
2579       BI = BB->begin();
2580     else
2581       ++BI;
2582   }
2583 
2584   return ChangedFunction;
2585 }
2586 
2587 // Instantiate an expression in a predecessor that lacked it.
2588 bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2589                                         BasicBlock *Curr, unsigned int ValNo) {
2590   // Because we are going top-down through the block, all value numbers
2591   // will be available in the predecessor by the time we need them.  Any
2592   // that weren't originally present will have been instantiated earlier
2593   // in this loop.
2594   bool success = true;
2595   for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2596     Value *Op = Instr->getOperand(i);
2597     if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2598       continue;
2599     // This could be a newly inserted instruction, in which case, we won't
2600     // find a value number, and should give up before we hurt ourselves.
2601     // FIXME: Rewrite the infrastructure to let it easier to value number
2602     // and process newly inserted instructions.
2603     if (!VN.exists(Op)) {
2604       success = false;
2605       break;
2606     }
2607     uint32_t TValNo =
2608         VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2609     if (Value *V = findLeader(Pred, TValNo)) {
2610       Instr->setOperand(i, V);
2611     } else {
2612       success = false;
2613       break;
2614     }
2615   }
2616 
2617   // Fail out if we encounter an operand that is not available in
2618   // the PRE predecessor.  This is typically because of loads which
2619   // are not value numbered precisely.
2620   if (!success)
2621     return false;
2622 
2623   Instr->insertBefore(Pred->getTerminator());
2624   Instr->setName(Instr->getName() + ".pre");
2625   Instr->setDebugLoc(Instr->getDebugLoc());
2626 
2627   ICF->insertInstructionTo(Instr, Pred);
2628 
2629   unsigned Num = VN.lookupOrAdd(Instr);
2630   VN.add(Instr, Num);
2631 
2632   // Update the availability map to include the new instruction.
2633   addToLeaderTable(Num, Instr, Pred);
2634   return true;
2635 }
2636 
2637 bool GVNPass::performScalarPRE(Instruction *CurInst) {
2638   if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2639       isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2640       CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2641       isa<DbgInfoIntrinsic>(CurInst))
2642     return false;
2643 
2644   // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2645   // sinking the compare again, and it would force the code generator to
2646   // move the i1 from processor flags or predicate registers into a general
2647   // purpose register.
2648   if (isa<CmpInst>(CurInst))
2649     return false;
2650 
2651   // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2652   // sinking the addressing mode computation back to its uses. Extending the
2653   // GEP's live range increases the register pressure, and therefore it can
2654   // introduce unnecessary spills.
2655   //
2656   // This doesn't prevent Load PRE. PHI translation will make the GEP available
2657   // to the load by moving it to the predecessor block if necessary.
2658   if (isa<GetElementPtrInst>(CurInst))
2659     return false;
2660 
2661   if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2662     // We don't currently value number ANY inline asm calls.
2663     if (CallB->isInlineAsm())
2664       return false;
2665     // Don't do PRE on convergent calls.
2666     if (CallB->isConvergent())
2667       return false;
2668   }
2669 
2670   uint32_t ValNo = VN.lookup(CurInst);
2671 
2672   // Look for the predecessors for PRE opportunities.  We're
2673   // only trying to solve the basic diamond case, where
2674   // a value is computed in the successor and one predecessor,
2675   // but not the other.  We also explicitly disallow cases
2676   // where the successor is its own predecessor, because they're
2677   // more complicated to get right.
2678   unsigned NumWith = 0;
2679   unsigned NumWithout = 0;
2680   BasicBlock *PREPred = nullptr;
2681   BasicBlock *CurrentBlock = CurInst->getParent();
2682 
2683   // Update the RPO numbers for this function.
2684   if (InvalidBlockRPONumbers)
2685     assignBlockRPONumber(*CurrentBlock->getParent());
2686 
2687   SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2688   for (BasicBlock *P : predecessors(CurrentBlock)) {
2689     // We're not interested in PRE where blocks with predecessors that are
2690     // not reachable.
2691     if (!DT->isReachableFromEntry(P)) {
2692       NumWithout = 2;
2693       break;
2694     }
2695     // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
2696     // when CurInst has operand defined in CurrentBlock (so it may be defined
2697     // by phi in the loop header).
2698     assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2699            "Invalid BlockRPONumber map.");
2700     if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
2701         llvm::any_of(CurInst->operands(), [&](const Use &U) {
2702           if (auto *Inst = dyn_cast<Instruction>(U.get()))
2703             return Inst->getParent() == CurrentBlock;
2704           return false;
2705         })) {
2706       NumWithout = 2;
2707       break;
2708     }
2709 
2710     uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2711     Value *predV = findLeader(P, TValNo);
2712     if (!predV) {
2713       predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2714       PREPred = P;
2715       ++NumWithout;
2716     } else if (predV == CurInst) {
2717       /* CurInst dominates this predecessor. */
2718       NumWithout = 2;
2719       break;
2720     } else {
2721       predMap.push_back(std::make_pair(predV, P));
2722       ++NumWith;
2723     }
2724   }
2725 
2726   // Don't do PRE when it might increase code size, i.e. when
2727   // we would need to insert instructions in more than one pred.
2728   if (NumWithout > 1 || NumWith == 0)
2729     return false;
2730 
2731   // We may have a case where all predecessors have the instruction,
2732   // and we just need to insert a phi node. Otherwise, perform
2733   // insertion.
2734   Instruction *PREInstr = nullptr;
2735 
2736   if (NumWithout != 0) {
2737     if (!isSafeToSpeculativelyExecute(CurInst)) {
2738       // It is only valid to insert a new instruction if the current instruction
2739       // is always executed. An instruction with implicit control flow could
2740       // prevent us from doing it. If we cannot speculate the execution, then
2741       // PRE should be prohibited.
2742       if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2743         return false;
2744     }
2745 
2746     // Don't do PRE across indirect branch.
2747     if (isa<IndirectBrInst>(PREPred->getTerminator()))
2748       return false;
2749 
2750     // Don't do PRE across callbr.
2751     // FIXME: Can we do this across the fallthrough edge?
2752     if (isa<CallBrInst>(PREPred->getTerminator()))
2753       return false;
2754 
2755     // We can't do PRE safely on a critical edge, so instead we schedule
2756     // the edge to be split and perform the PRE the next time we iterate
2757     // on the function.
2758     unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2759     if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2760       toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2761       return false;
2762     }
2763     // We need to insert somewhere, so let's give it a shot
2764     PREInstr = CurInst->clone();
2765     if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2766       // If we failed insertion, make sure we remove the instruction.
2767       LLVM_DEBUG(verifyRemoved(PREInstr));
2768       PREInstr->deleteValue();
2769       return false;
2770     }
2771   }
2772 
2773   // Either we should have filled in the PRE instruction, or we should
2774   // not have needed insertions.
2775   assert(PREInstr != nullptr || NumWithout == 0);
2776 
2777   ++NumGVNPRE;
2778 
2779   // Create a PHI to make the value available in this block.
2780   PHINode *Phi =
2781       PHINode::Create(CurInst->getType(), predMap.size(),
2782                       CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2783   for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2784     if (Value *V = predMap[i].first) {
2785       // If we use an existing value in this phi, we have to patch the original
2786       // value because the phi will be used to replace a later value.
2787       patchReplacementInstruction(CurInst, V);
2788       Phi->addIncoming(V, predMap[i].second);
2789     } else
2790       Phi->addIncoming(PREInstr, PREPred);
2791   }
2792 
2793   VN.add(Phi, ValNo);
2794   // After creating a new PHI for ValNo, the phi translate result for ValNo will
2795   // be changed, so erase the related stale entries in phi translate cache.
2796   VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2797   addToLeaderTable(ValNo, Phi, CurrentBlock);
2798   Phi->setDebugLoc(CurInst->getDebugLoc());
2799   CurInst->replaceAllUsesWith(Phi);
2800   if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2801     MD->invalidateCachedPointerInfo(Phi);
2802   VN.erase(CurInst);
2803   removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2804 
2805   LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2806   if (MD)
2807     MD->removeInstruction(CurInst);
2808   if (MSSAU)
2809     MSSAU->removeMemoryAccess(CurInst);
2810   LLVM_DEBUG(verifyRemoved(CurInst));
2811   // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2812   // some assertion failures.
2813   ICF->removeInstruction(CurInst);
2814   CurInst->eraseFromParent();
2815   ++NumGVNInstr;
2816 
2817   return true;
2818 }
2819 
2820 /// Perform a purely local form of PRE that looks for diamond
2821 /// control flow patterns and attempts to perform simple PRE at the join point.
2822 bool GVNPass::performPRE(Function &F) {
2823   bool Changed = false;
2824   for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2825     // Nothing to PRE in the entry block.
2826     if (CurrentBlock == &F.getEntryBlock())
2827       continue;
2828 
2829     // Don't perform PRE on an EH pad.
2830     if (CurrentBlock->isEHPad())
2831       continue;
2832 
2833     for (BasicBlock::iterator BI = CurrentBlock->begin(),
2834                               BE = CurrentBlock->end();
2835          BI != BE;) {
2836       Instruction *CurInst = &*BI++;
2837       Changed |= performScalarPRE(CurInst);
2838     }
2839   }
2840 
2841   if (splitCriticalEdges())
2842     Changed = true;
2843 
2844   return Changed;
2845 }
2846 
2847 /// Split the critical edge connecting the given two blocks, and return
2848 /// the block inserted to the critical edge.
2849 BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2850   // GVN does not require loop-simplify, do not try to preserve it if it is not
2851   // possible.
2852   BasicBlock *BB = SplitCriticalEdge(
2853       Pred, Succ,
2854       CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
2855   if (BB) {
2856     if (MD)
2857       MD->invalidateCachedPredecessors();
2858     InvalidBlockRPONumbers = true;
2859   }
2860   return BB;
2861 }
2862 
2863 /// Split critical edges found during the previous
2864 /// iteration that may enable further optimization.
2865 bool GVNPass::splitCriticalEdges() {
2866   if (toSplit.empty())
2867     return false;
2868 
2869   bool Changed = false;
2870   do {
2871     std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2872     Changed |= SplitCriticalEdge(Edge.first, Edge.second,
2873                                  CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
2874                nullptr;
2875   } while (!toSplit.empty());
2876   if (Changed) {
2877     if (MD)
2878       MD->invalidateCachedPredecessors();
2879     InvalidBlockRPONumbers = true;
2880   }
2881   return Changed;
2882 }
2883 
2884 /// Executes one iteration of GVN
2885 bool GVNPass::iterateOnFunction(Function &F) {
2886   cleanupGlobalSets();
2887 
2888   // Top-down walk of the dominator tree
2889   bool Changed = false;
2890   // Needed for value numbering with phi construction to work.
2891   // RPOT walks the graph in its constructor and will not be invalidated during
2892   // processBlock.
2893   ReversePostOrderTraversal<Function *> RPOT(&F);
2894 
2895   for (BasicBlock *BB : RPOT)
2896     Changed |= processBlock(BB);
2897 
2898   return Changed;
2899 }
2900 
2901 void GVNPass::cleanupGlobalSets() {
2902   VN.clear();
2903   LeaderTable.clear();
2904   BlockRPONumber.clear();
2905   TableAllocator.Reset();
2906   ICF->clear();
2907   InvalidBlockRPONumbers = true;
2908 }
2909 
2910 /// Verify that the specified instruction does not occur in our
2911 /// internal data structures.
2912 void GVNPass::verifyRemoved(const Instruction *Inst) const {
2913   VN.verifyRemoved(Inst);
2914 
2915   // Walk through the value number scope to make sure the instruction isn't
2916   // ferreted away in it.
2917   for (const auto &I : LeaderTable) {
2918     const LeaderTableEntry *Node = &I.second;
2919     assert(Node->Val != Inst && "Inst still in value numbering scope!");
2920 
2921     while (Node->Next) {
2922       Node = Node->Next;
2923       assert(Node->Val != Inst && "Inst still in value numbering scope!");
2924     }
2925   }
2926 }
2927 
2928 /// BB is declared dead, which implied other blocks become dead as well. This
2929 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
2930 /// live successors, update their phi nodes by replacing the operands
2931 /// corresponding to dead blocks with UndefVal.
2932 void GVNPass::addDeadBlock(BasicBlock *BB) {
2933   SmallVector<BasicBlock *, 4> NewDead;
2934   SmallSetVector<BasicBlock *, 4> DF;
2935 
2936   NewDead.push_back(BB);
2937   while (!NewDead.empty()) {
2938     BasicBlock *D = NewDead.pop_back_val();
2939     if (DeadBlocks.count(D))
2940       continue;
2941 
2942     // All blocks dominated by D are dead.
2943     SmallVector<BasicBlock *, 8> Dom;
2944     DT->getDescendants(D, Dom);
2945     DeadBlocks.insert(Dom.begin(), Dom.end());
2946 
2947     // Figure out the dominance-frontier(D).
2948     for (BasicBlock *B : Dom) {
2949       for (BasicBlock *S : successors(B)) {
2950         if (DeadBlocks.count(S))
2951           continue;
2952 
2953         bool AllPredDead = true;
2954         for (BasicBlock *P : predecessors(S))
2955           if (!DeadBlocks.count(P)) {
2956             AllPredDead = false;
2957             break;
2958           }
2959 
2960         if (!AllPredDead) {
2961           // S could be proved dead later on. That is why we don't update phi
2962           // operands at this moment.
2963           DF.insert(S);
2964         } else {
2965           // While S is not dominated by D, it is dead by now. This could take
2966           // place if S already have a dead predecessor before D is declared
2967           // dead.
2968           NewDead.push_back(S);
2969         }
2970       }
2971     }
2972   }
2973 
2974   // For the dead blocks' live successors, update their phi nodes by replacing
2975   // the operands corresponding to dead blocks with UndefVal.
2976   for (BasicBlock *B : DF) {
2977     if (DeadBlocks.count(B))
2978       continue;
2979 
2980     // First, split the critical edges. This might also create additional blocks
2981     // to preserve LoopSimplify form and adjust edges accordingly.
2982     SmallVector<BasicBlock *, 4> Preds(predecessors(B));
2983     for (BasicBlock *P : Preds) {
2984       if (!DeadBlocks.count(P))
2985         continue;
2986 
2987       if (llvm::is_contained(successors(P), B) &&
2988           isCriticalEdge(P->getTerminator(), B)) {
2989         if (BasicBlock *S = splitCriticalEdges(P, B))
2990           DeadBlocks.insert(P = S);
2991       }
2992     }
2993 
2994     // Now undef the incoming values from the dead predecessors.
2995     for (BasicBlock *P : predecessors(B)) {
2996       if (!DeadBlocks.count(P))
2997         continue;
2998       for (PHINode &Phi : B->phis()) {
2999         Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
3000         if (MD)
3001           MD->invalidateCachedPointerInfo(&Phi);
3002       }
3003     }
3004   }
3005 }
3006 
3007 // If the given branch is recognized as a foldable branch (i.e. conditional
3008 // branch with constant condition), it will perform following analyses and
3009 // transformation.
3010 //  1) If the dead out-coming edge is a critical-edge, split it. Let
3011 //     R be the target of the dead out-coming edge.
3012 //  1) Identify the set of dead blocks implied by the branch's dead outcoming
3013 //     edge. The result of this step will be {X| X is dominated by R}
3014 //  2) Identify those blocks which haves at least one dead predecessor. The
3015 //     result of this step will be dominance-frontier(R).
3016 //  3) Update the PHIs in DF(R) by replacing the operands corresponding to
3017 //     dead blocks with "UndefVal" in an hope these PHIs will optimized away.
3018 //
3019 // Return true iff *NEW* dead code are found.
3020 bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3021   if (!BI || BI->isUnconditional())
3022     return false;
3023 
3024   // If a branch has two identical successors, we cannot declare either dead.
3025   if (BI->getSuccessor(0) == BI->getSuccessor(1))
3026     return false;
3027 
3028   ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3029   if (!Cond)
3030     return false;
3031 
3032   BasicBlock *DeadRoot =
3033       Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3034   if (DeadBlocks.count(DeadRoot))
3035     return false;
3036 
3037   if (!DeadRoot->getSinglePredecessor())
3038     DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3039 
3040   addDeadBlock(DeadRoot);
3041   return true;
3042 }
3043 
3044 // performPRE() will trigger assert if it comes across an instruction without
3045 // associated val-num. As it normally has far more live instructions than dead
3046 // instructions, it makes more sense just to "fabricate" a val-number for the
3047 // dead code than checking if instruction involved is dead or not.
3048 void GVNPass::assignValNumForDeadCode() {
3049   for (BasicBlock *BB : DeadBlocks) {
3050     for (Instruction &Inst : *BB) {
3051       unsigned ValNum = VN.lookupOrAdd(&Inst);
3052       addToLeaderTable(ValNum, &Inst, BB);
3053     }
3054   }
3055 }
3056 
3057 class llvm::gvn::GVNLegacyPass : public FunctionPass {
3058 public:
3059   static char ID; // Pass identification, replacement for typeid
3060 
3061   explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
3062       : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
3063     initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
3064   }
3065 
3066   bool runOnFunction(Function &F) override {
3067     if (skipFunction(F))
3068       return false;
3069 
3070     auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3071 
3072     auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3073     return Impl.runImpl(
3074         F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3075         getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3076         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3077         getAnalysis<AAResultsWrapperPass>().getAAResults(),
3078         Impl.isMemDepEnabled()
3079             ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3080             : nullptr,
3081         LIWP ? &LIWP->getLoopInfo() : nullptr,
3082         &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3083         MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3084   }
3085 
3086   void getAnalysisUsage(AnalysisUsage &AU) const override {
3087     AU.addRequired<AssumptionCacheTracker>();
3088     AU.addRequired<DominatorTreeWrapperPass>();
3089     AU.addRequired<TargetLibraryInfoWrapperPass>();
3090     AU.addRequired<LoopInfoWrapperPass>();
3091     if (Impl.isMemDepEnabled())
3092       AU.addRequired<MemoryDependenceWrapperPass>();
3093     AU.addRequired<AAResultsWrapperPass>();
3094     AU.addPreserved<DominatorTreeWrapperPass>();
3095     AU.addPreserved<GlobalsAAWrapperPass>();
3096     AU.addPreserved<TargetLibraryInfoWrapperPass>();
3097     AU.addPreserved<LoopInfoWrapperPass>();
3098     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
3099     AU.addPreserved<MemorySSAWrapperPass>();
3100   }
3101 
3102 private:
3103   GVNPass Impl;
3104 };
3105 
3106 char GVNLegacyPass::ID = 0;
3107 
3108 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3109 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
3110 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
3111 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3112 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3113 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
3114 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
3115 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
3116 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3117 
3118 // The public interface to this file...
3119 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
3120   return new GVNLegacyPass(NoMemDepAnalysis);
3121 }
3122