xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Utils/SCCPSolver.cpp (revision 770cf0a5f02dc8983a89c6568d741fbc25baa999)
1 //===- SCCPSolver.cpp - SCCP Utility --------------------------- *- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements the Sparse Conditional Constant Propagation (SCCP)
11 // utility.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/SCCPSolver.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/Analysis/ConstantFolding.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/ValueLattice.h"
20 #include "llvm/Analysis/ValueLatticeUtils.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/InstVisitor.h"
23 #include "llvm/IR/PatternMatch.h"
24 #include "llvm/Support/Casting.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 #include <cassert>
30 #include <utility>
31 #include <vector>
32 
33 using namespace llvm;
34 using namespace PatternMatch;
35 
36 #define DEBUG_TYPE "sccp"
37 
38 // The maximum number of range extensions allowed for operations requiring
39 // widening.
40 static const unsigned MaxNumRangeExtensions = 10;
41 
42 /// Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
43 static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts() {
44   return ValueLatticeElement::MergeOptions().setMaxWidenSteps(
45       MaxNumRangeExtensions);
46 }
47 
48 namespace llvm {
49 
50 bool SCCPSolver::isConstant(const ValueLatticeElement &LV) {
51   return LV.isConstant() ||
52          (LV.isConstantRange() && LV.getConstantRange().isSingleElement());
53 }
54 
55 bool SCCPSolver::isOverdefined(const ValueLatticeElement &LV) {
56   return !LV.isUnknownOrUndef() && !SCCPSolver::isConstant(LV);
57 }
58 
59 bool SCCPSolver::tryToReplaceWithConstant(Value *V) {
60   Constant *Const = getConstantOrNull(V);
61   if (!Const)
62     return false;
63   // Replacing `musttail` instructions with constant breaks `musttail` invariant
64   // unless the call itself can be removed.
65   // Calls with "clang.arc.attachedcall" implicitly use the return value and
66   // those uses cannot be updated with a constant.
67   CallBase *CB = dyn_cast<CallBase>(V);
68   if (CB && ((CB->isMustTailCall() && !wouldInstructionBeTriviallyDead(CB)) ||
69              CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall))) {
70     Function *F = CB->getCalledFunction();
71 
72     // Don't zap returns of the callee
73     if (F)
74       addToMustPreserveReturnsInFunctions(F);
75 
76     LLVM_DEBUG(dbgs() << "  Can\'t treat the result of call " << *CB
77                       << " as a constant\n");
78     return false;
79   }
80 
81   LLVM_DEBUG(dbgs() << "  Constant: " << *Const << " = " << *V << '\n');
82 
83   // Replaces all of the uses of a variable with uses of the constant.
84   V->replaceAllUsesWith(Const);
85   return true;
86 }
87 
88 /// Helper for getting ranges from \p Solver. Instructions inserted during
89 /// simplification are unavailable in the solver, so we return a full range for
90 /// them.
91 static ConstantRange getRange(Value *Op, SCCPSolver &Solver,
92                               const SmallPtrSetImpl<Value *> &InsertedValues) {
93   if (auto *Const = dyn_cast<Constant>(Op))
94     return Const->toConstantRange();
95   if (InsertedValues.contains(Op)) {
96     unsigned Bitwidth = Op->getType()->getScalarSizeInBits();
97     return ConstantRange::getFull(Bitwidth);
98   }
99   return Solver.getLatticeValueFor(Op).asConstantRange(Op->getType(),
100                                                        /*UndefAllowed=*/false);
101 }
102 
103 /// Try to use \p Inst's value range from \p Solver to infer the NUW flag.
104 static bool refineInstruction(SCCPSolver &Solver,
105                               const SmallPtrSetImpl<Value *> &InsertedValues,
106                               Instruction &Inst) {
107   bool Changed = false;
108   auto GetRange = [&Solver, &InsertedValues](Value *Op) {
109     return getRange(Op, Solver, InsertedValues);
110   };
111 
112   if (isa<OverflowingBinaryOperator>(Inst)) {
113     if (Inst.hasNoSignedWrap() && Inst.hasNoUnsignedWrap())
114       return false;
115 
116     auto RangeA = GetRange(Inst.getOperand(0));
117     auto RangeB = GetRange(Inst.getOperand(1));
118     if (!Inst.hasNoUnsignedWrap()) {
119       auto NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
120           Instruction::BinaryOps(Inst.getOpcode()), RangeB,
121           OverflowingBinaryOperator::NoUnsignedWrap);
122       if (NUWRange.contains(RangeA)) {
123         Inst.setHasNoUnsignedWrap();
124         Changed = true;
125       }
126     }
127     if (!Inst.hasNoSignedWrap()) {
128       auto NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
129           Instruction::BinaryOps(Inst.getOpcode()), RangeB,
130           OverflowingBinaryOperator::NoSignedWrap);
131       if (NSWRange.contains(RangeA)) {
132         Inst.setHasNoSignedWrap();
133         Changed = true;
134       }
135     }
136   } else if (isa<PossiblyNonNegInst>(Inst) && !Inst.hasNonNeg()) {
137     auto Range = GetRange(Inst.getOperand(0));
138     if (Range.isAllNonNegative()) {
139       Inst.setNonNeg();
140       Changed = true;
141     }
142   } else if (TruncInst *TI = dyn_cast<TruncInst>(&Inst)) {
143     if (TI->hasNoSignedWrap() && TI->hasNoUnsignedWrap())
144       return false;
145 
146     auto Range = GetRange(Inst.getOperand(0));
147     uint64_t DestWidth = TI->getDestTy()->getScalarSizeInBits();
148     if (!TI->hasNoUnsignedWrap()) {
149       if (Range.getActiveBits() <= DestWidth) {
150         TI->setHasNoUnsignedWrap(true);
151         Changed = true;
152       }
153     }
154     if (!TI->hasNoSignedWrap()) {
155       if (Range.getMinSignedBits() <= DestWidth) {
156         TI->setHasNoSignedWrap(true);
157         Changed = true;
158       }
159     }
160   } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&Inst)) {
161     if (GEP->hasNoUnsignedWrap() || !GEP->hasNoUnsignedSignedWrap())
162       return false;
163 
164     if (all_of(GEP->indices(),
165                [&](Value *V) { return GetRange(V).isAllNonNegative(); })) {
166       GEP->setNoWrapFlags(GEP->getNoWrapFlags() |
167                           GEPNoWrapFlags::noUnsignedWrap());
168       Changed = true;
169     }
170   }
171 
172   return Changed;
173 }
174 
175 /// Try to replace signed instructions with their unsigned equivalent.
176 static bool replaceSignedInst(SCCPSolver &Solver,
177                               SmallPtrSetImpl<Value *> &InsertedValues,
178                               Instruction &Inst) {
179   // Determine if a signed value is known to be >= 0.
180   auto isNonNegative = [&Solver, &InsertedValues](Value *V) {
181     return getRange(V, Solver, InsertedValues).isAllNonNegative();
182   };
183 
184   Instruction *NewInst = nullptr;
185   switch (Inst.getOpcode()) {
186   case Instruction::SIToFP:
187   case Instruction::SExt: {
188     // If the source value is not negative, this is a zext/uitofp.
189     Value *Op0 = Inst.getOperand(0);
190     if (!isNonNegative(Op0))
191       return false;
192     NewInst = CastInst::Create(Inst.getOpcode() == Instruction::SExt
193                                    ? Instruction::ZExt
194                                    : Instruction::UIToFP,
195                                Op0, Inst.getType(), "", Inst.getIterator());
196     NewInst->setNonNeg();
197     break;
198   }
199   case Instruction::AShr: {
200     // If the shifted value is not negative, this is a logical shift right.
201     Value *Op0 = Inst.getOperand(0);
202     if (!isNonNegative(Op0))
203       return false;
204     NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", Inst.getIterator());
205     NewInst->setIsExact(Inst.isExact());
206     break;
207   }
208   case Instruction::SDiv:
209   case Instruction::SRem: {
210     // If both operands are not negative, this is the same as udiv/urem.
211     Value *Op0 = Inst.getOperand(0), *Op1 = Inst.getOperand(1);
212     if (!isNonNegative(Op0) || !isNonNegative(Op1))
213       return false;
214     auto NewOpcode = Inst.getOpcode() == Instruction::SDiv ? Instruction::UDiv
215                                                            : Instruction::URem;
216     NewInst = BinaryOperator::Create(NewOpcode, Op0, Op1, "", Inst.getIterator());
217     if (Inst.getOpcode() == Instruction::SDiv)
218       NewInst->setIsExact(Inst.isExact());
219     break;
220   }
221   default:
222     return false;
223   }
224 
225   // Wire up the new instruction and update state.
226   assert(NewInst && "Expected replacement instruction");
227   NewInst->takeName(&Inst);
228   InsertedValues.insert(NewInst);
229   Inst.replaceAllUsesWith(NewInst);
230   NewInst->setDebugLoc(Inst.getDebugLoc());
231   Solver.removeLatticeValueFor(&Inst);
232   Inst.eraseFromParent();
233   return true;
234 }
235 
236 /// Try to use \p Inst's value range from \p Solver to simplify it.
237 static Value *simplifyInstruction(SCCPSolver &Solver,
238                                   SmallPtrSetImpl<Value *> &InsertedValues,
239                                   Instruction &Inst) {
240   auto GetRange = [&Solver, &InsertedValues](Value *Op) {
241     return getRange(Op, Solver, InsertedValues);
242   };
243 
244   Value *X;
245   const APInt *RHSC;
246   // Remove masking operations.
247   if (match(&Inst, m_And(m_Value(X), m_LowBitMask(RHSC)))) {
248     ConstantRange LRange = GetRange(Inst.getOperand(0));
249     if (LRange.getUnsignedMax().ule(*RHSC))
250       return X;
251   }
252 
253   return nullptr;
254 }
255 
256 bool SCCPSolver::simplifyInstsInBlock(BasicBlock &BB,
257                                       SmallPtrSetImpl<Value *> &InsertedValues,
258                                       Statistic &InstRemovedStat,
259                                       Statistic &InstReplacedStat) {
260   bool MadeChanges = false;
261   for (Instruction &Inst : make_early_inc_range(BB)) {
262     if (Inst.getType()->isVoidTy())
263       continue;
264     if (tryToReplaceWithConstant(&Inst)) {
265       if (wouldInstructionBeTriviallyDead(&Inst))
266         Inst.eraseFromParent();
267 
268       MadeChanges = true;
269       ++InstRemovedStat;
270     } else if (replaceSignedInst(*this, InsertedValues, Inst)) {
271       MadeChanges = true;
272       ++InstReplacedStat;
273     } else if (refineInstruction(*this, InsertedValues, Inst)) {
274       MadeChanges = true;
275     } else if (auto *V = simplifyInstruction(*this, InsertedValues, Inst)) {
276       Inst.replaceAllUsesWith(V);
277       Inst.eraseFromParent();
278       ++InstRemovedStat;
279       MadeChanges = true;
280     }
281   }
282   return MadeChanges;
283 }
284 
285 bool SCCPSolver::removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU,
286                                         BasicBlock *&NewUnreachableBB) const {
287   SmallPtrSet<BasicBlock *, 8> FeasibleSuccessors;
288   bool HasNonFeasibleEdges = false;
289   for (BasicBlock *Succ : successors(BB)) {
290     if (isEdgeFeasible(BB, Succ))
291       FeasibleSuccessors.insert(Succ);
292     else
293       HasNonFeasibleEdges = true;
294   }
295 
296   // All edges feasible, nothing to do.
297   if (!HasNonFeasibleEdges)
298     return false;
299 
300   // SCCP can only determine non-feasible edges for br, switch and indirectbr.
301   Instruction *TI = BB->getTerminator();
302   assert((isa<BranchInst>(TI) || isa<SwitchInst>(TI) ||
303           isa<IndirectBrInst>(TI)) &&
304          "Terminator must be a br, switch or indirectbr");
305 
306   if (FeasibleSuccessors.size() == 0) {
307     // Branch on undef/poison, replace with unreachable.
308     SmallPtrSet<BasicBlock *, 8> SeenSuccs;
309     SmallVector<DominatorTree::UpdateType, 8> Updates;
310     for (BasicBlock *Succ : successors(BB)) {
311       Succ->removePredecessor(BB);
312       if (SeenSuccs.insert(Succ).second)
313         Updates.push_back({DominatorTree::Delete, BB, Succ});
314     }
315     TI->eraseFromParent();
316     new UnreachableInst(BB->getContext(), BB);
317     DTU.applyUpdatesPermissive(Updates);
318   } else if (FeasibleSuccessors.size() == 1) {
319     // Replace with an unconditional branch to the only feasible successor.
320     BasicBlock *OnlyFeasibleSuccessor = *FeasibleSuccessors.begin();
321     SmallVector<DominatorTree::UpdateType, 8> Updates;
322     bool HaveSeenOnlyFeasibleSuccessor = false;
323     for (BasicBlock *Succ : successors(BB)) {
324       if (Succ == OnlyFeasibleSuccessor && !HaveSeenOnlyFeasibleSuccessor) {
325         // Don't remove the edge to the only feasible successor the first time
326         // we see it. We still do need to remove any multi-edges to it though.
327         HaveSeenOnlyFeasibleSuccessor = true;
328         continue;
329       }
330 
331       Succ->removePredecessor(BB);
332       Updates.push_back({DominatorTree::Delete, BB, Succ});
333     }
334 
335     Instruction *BI = BranchInst::Create(OnlyFeasibleSuccessor, BB);
336     BI->setDebugLoc(TI->getDebugLoc());
337     TI->eraseFromParent();
338     DTU.applyUpdatesPermissive(Updates);
339   } else if (FeasibleSuccessors.size() > 1) {
340     SwitchInstProfUpdateWrapper SI(*cast<SwitchInst>(TI));
341     SmallVector<DominatorTree::UpdateType, 8> Updates;
342 
343     // If the default destination is unfeasible it will never be taken. Replace
344     // it with a new block with a single Unreachable instruction.
345     BasicBlock *DefaultDest = SI->getDefaultDest();
346     if (!FeasibleSuccessors.contains(DefaultDest)) {
347       if (!NewUnreachableBB) {
348         NewUnreachableBB =
349             BasicBlock::Create(DefaultDest->getContext(), "default.unreachable",
350                                DefaultDest->getParent(), DefaultDest);
351         auto *UI =
352             new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB);
353         UI->setDebugLoc(DebugLoc::getTemporary());
354       }
355 
356       DefaultDest->removePredecessor(BB);
357       SI->setDefaultDest(NewUnreachableBB);
358       Updates.push_back({DominatorTree::Delete, BB, DefaultDest});
359       Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB});
360     }
361 
362     for (auto CI = SI->case_begin(); CI != SI->case_end();) {
363       if (FeasibleSuccessors.contains(CI->getCaseSuccessor())) {
364         ++CI;
365         continue;
366       }
367 
368       BasicBlock *Succ = CI->getCaseSuccessor();
369       Succ->removePredecessor(BB);
370       Updates.push_back({DominatorTree::Delete, BB, Succ});
371       SI.removeCase(CI);
372       // Don't increment CI, as we removed a case.
373     }
374 
375     DTU.applyUpdatesPermissive(Updates);
376   } else {
377     llvm_unreachable("Must have at least one feasible successor");
378   }
379   return true;
380 }
381 
382 static void inferAttribute(Function *F, unsigned AttrIndex,
383                            const ValueLatticeElement &Val) {
384   // If there is a known constant range for the value, add range attribute.
385   if (Val.isConstantRange() && !Val.getConstantRange().isSingleElement()) {
386     // Do not add range attribute if the value may include undef.
387     if (Val.isConstantRangeIncludingUndef())
388       return;
389 
390     // Take the intersection of the existing attribute and the inferred range.
391     Attribute OldAttr = F->getAttributeAtIndex(AttrIndex, Attribute::Range);
392     ConstantRange CR = Val.getConstantRange();
393     if (OldAttr.isValid())
394       CR = CR.intersectWith(OldAttr.getRange());
395     F->addAttributeAtIndex(
396         AttrIndex, Attribute::get(F->getContext(), Attribute::Range, CR));
397     return;
398   }
399   // Infer nonnull attribute.
400   if (Val.isNotConstant() && Val.getNotConstant()->getType()->isPointerTy() &&
401       Val.getNotConstant()->isNullValue() &&
402       !F->hasAttributeAtIndex(AttrIndex, Attribute::NonNull)) {
403     F->addAttributeAtIndex(AttrIndex,
404                            Attribute::get(F->getContext(), Attribute::NonNull));
405   }
406 }
407 
408 void SCCPSolver::inferReturnAttributes() const {
409   for (const auto &[F, ReturnValue] : getTrackedRetVals())
410     inferAttribute(F, AttributeList::ReturnIndex, ReturnValue);
411 }
412 
413 void SCCPSolver::inferArgAttributes() const {
414   for (Function *F : getArgumentTrackedFunctions()) {
415     if (!isBlockExecutable(&F->front()))
416       continue;
417     for (Argument &A : F->args())
418       if (!A.getType()->isStructTy())
419         inferAttribute(F, AttributeList::FirstArgIndex + A.getArgNo(),
420                        getLatticeValueFor(&A));
421   }
422 }
423 
424 /// Helper class for SCCPSolver. This implements the instruction visitor and
425 /// holds all the state.
426 class SCCPInstVisitor : public InstVisitor<SCCPInstVisitor> {
427   const DataLayout &DL;
428   std::function<const TargetLibraryInfo &(Function &)> GetTLI;
429   /// Basic blocks that are executable (but may not have been visited yet).
430   SmallPtrSet<BasicBlock *, 8> BBExecutable;
431   /// Basic blocks that are executable and have been visited at least once.
432   SmallPtrSet<BasicBlock *, 8> BBVisited;
433   DenseMap<Value *, ValueLatticeElement>
434       ValueState; // The state each value is in.
435 
436   /// StructValueState - This maintains ValueState for values that have
437   /// StructType, for example for formal arguments, calls, insertelement, etc.
438   DenseMap<std::pair<Value *, unsigned>, ValueLatticeElement> StructValueState;
439 
440   /// GlobalValue - If we are tracking any values for the contents of a global
441   /// variable, we keep a mapping from the constant accessor to the element of
442   /// the global, to the currently known value.  If the value becomes
443   /// overdefined, it's entry is simply removed from this map.
444   DenseMap<GlobalVariable *, ValueLatticeElement> TrackedGlobals;
445 
446   /// TrackedRetVals - If we are tracking arguments into and the return
447   /// value out of a function, it will have an entry in this map, indicating
448   /// what the known return value for the function is.
449   MapVector<Function *, ValueLatticeElement> TrackedRetVals;
450 
451   /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
452   /// that return multiple values.
453   MapVector<std::pair<Function *, unsigned>, ValueLatticeElement>
454       TrackedMultipleRetVals;
455 
456   /// The set of values whose lattice has been invalidated.
457   /// Populated by resetLatticeValueFor(), cleared after resolving undefs.
458   DenseSet<Value *> Invalidated;
459 
460   /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
461   /// represented here for efficient lookup.
462   SmallPtrSet<Function *, 16> MRVFunctionsTracked;
463 
464   /// A list of functions whose return cannot be modified.
465   SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions;
466 
467   /// TrackingIncomingArguments - This is the set of functions for whose
468   /// arguments we make optimistic assumptions about and try to prove as
469   /// constants.
470   SmallPtrSet<Function *, 16> TrackingIncomingArguments;
471 
472   /// Worklist of instructions to re-visit. This only includes instructions
473   /// in blocks that have already been visited at least once.
474   SmallSetVector<Instruction *, 16> InstWorkList;
475 
476   /// Current instruction while visiting a block for the first time, used to
477   /// avoid unnecessary instruction worklist insertions. Null if an instruction
478   /// is visited outside a whole-block visitation.
479   Instruction *CurI = nullptr;
480 
481   // The BasicBlock work list
482   SmallVector<BasicBlock *, 64> BBWorkList;
483 
484   /// KnownFeasibleEdges - Entries in this set are edges which have already had
485   /// PHI nodes retriggered.
486   using Edge = std::pair<BasicBlock *, BasicBlock *>;
487   DenseSet<Edge> KnownFeasibleEdges;
488 
489   DenseMap<Function *, std::unique_ptr<PredicateInfo>> FnPredicateInfo;
490 
491   DenseMap<Value *, SmallSetVector<User *, 2>> AdditionalUsers;
492 
493   LLVMContext &Ctx;
494 
495   BumpPtrAllocator PredicateInfoAllocator;
496 
497 private:
498   ConstantInt *getConstantInt(const ValueLatticeElement &IV, Type *Ty) const {
499     return dyn_cast_or_null<ConstantInt>(getConstant(IV, Ty));
500   }
501 
502   /// Push instruction \p I to the worklist.
503   void pushToWorkList(Instruction *I);
504 
505   /// Push users of value \p V to the worklist.
506   void pushUsersToWorkList(Value *V);
507 
508   /// Like pushUsersToWorkList(), but also prints a debug message with the
509   /// updated value.
510   void pushUsersToWorkListMsg(ValueLatticeElement &IV, Value *V);
511 
512   // markConstant - Make a value be marked as "constant".  If the value
513   // is not already a constant, add it to the instruction work list so that
514   // the users of the instruction are updated later.
515   bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C,
516                     bool MayIncludeUndef = false);
517 
518   bool markConstant(Value *V, Constant *C) {
519     assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
520     return markConstant(ValueState[V], V, C);
521   }
522 
523   bool markNotConstant(ValueLatticeElement &IV, Value *V, Constant *C);
524 
525   bool markNotNull(ValueLatticeElement &IV, Value *V) {
526     return markNotConstant(IV, V, Constant::getNullValue(V->getType()));
527   }
528 
529   /// markConstantRange - Mark the object as constant range with \p CR. If the
530   /// object is not a constant range with the range \p CR, add it to the
531   /// instruction work list so that the users of the instruction are updated
532   /// later.
533   bool markConstantRange(ValueLatticeElement &IV, Value *V,
534                          const ConstantRange &CR);
535 
536   // markOverdefined - Make a value be marked as "overdefined". If the
537   // value is not already overdefined, add it to the overdefined instruction
538   // work list so that the users of the instruction are updated later.
539   bool markOverdefined(ValueLatticeElement &IV, Value *V);
540 
541   /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
542   /// changes.
543   bool mergeInValue(ValueLatticeElement &IV, Value *V,
544                     ValueLatticeElement MergeWithV,
545                     ValueLatticeElement::MergeOptions Opts = {
546                         /*MayIncludeUndef=*/false, /*CheckWiden=*/false});
547 
548   bool mergeInValue(Value *V, ValueLatticeElement MergeWithV,
549                     ValueLatticeElement::MergeOptions Opts = {
550                         /*MayIncludeUndef=*/false, /*CheckWiden=*/false}) {
551     assert(!V->getType()->isStructTy() &&
552            "non-structs should use markConstant");
553     return mergeInValue(ValueState[V], V, MergeWithV, Opts);
554   }
555 
556   /// getValueState - Return the ValueLatticeElement object that corresponds to
557   /// the value.  This function handles the case when the value hasn't been seen
558   /// yet by properly seeding constants etc.
559   ValueLatticeElement &getValueState(Value *V) {
560     assert(!V->getType()->isStructTy() && "Should use getStructValueState");
561 
562     auto I = ValueState.try_emplace(V);
563     ValueLatticeElement &LV = I.first->second;
564 
565     if (!I.second)
566       return LV; // Common case, already in the map.
567 
568     if (auto *C = dyn_cast<Constant>(V))
569       LV.markConstant(C); // Constants are constant
570 
571     // All others are unknown by default.
572     return LV;
573   }
574 
575   /// getStructValueState - Return the ValueLatticeElement object that
576   /// corresponds to the value/field pair.  This function handles the case when
577   /// the value hasn't been seen yet by properly seeding constants etc.
578   ValueLatticeElement &getStructValueState(Value *V, unsigned i) {
579     assert(V->getType()->isStructTy() && "Should use getValueState");
580     assert(i < cast<StructType>(V->getType())->getNumElements() &&
581            "Invalid element #");
582 
583     auto I = StructValueState.insert(
584         std::make_pair(std::make_pair(V, i), ValueLatticeElement()));
585     ValueLatticeElement &LV = I.first->second;
586 
587     if (!I.second)
588       return LV; // Common case, already in the map.
589 
590     if (auto *C = dyn_cast<Constant>(V)) {
591       Constant *Elt = C->getAggregateElement(i);
592 
593       if (!Elt)
594         LV.markOverdefined(); // Unknown sort of constant.
595       else
596         LV.markConstant(Elt); // Constants are constant.
597     }
598 
599     // All others are underdefined by default.
600     return LV;
601   }
602 
603   /// Traverse the use-def chain of \p Call, marking itself and its users as
604   /// "unknown" on the way.
605   void invalidate(CallBase *Call) {
606     SmallVector<Instruction *, 64> ToInvalidate;
607     ToInvalidate.push_back(Call);
608 
609     while (!ToInvalidate.empty()) {
610       Instruction *Inst = ToInvalidate.pop_back_val();
611 
612       if (!Invalidated.insert(Inst).second)
613         continue;
614 
615       if (!BBExecutable.count(Inst->getParent()))
616         continue;
617 
618       Value *V = nullptr;
619       // For return instructions we need to invalidate the tracked returns map.
620       // Anything else has its lattice in the value map.
621       if (auto *RetInst = dyn_cast<ReturnInst>(Inst)) {
622         Function *F = RetInst->getParent()->getParent();
623         if (auto It = TrackedRetVals.find(F); It != TrackedRetVals.end()) {
624           It->second = ValueLatticeElement();
625           V = F;
626         } else if (MRVFunctionsTracked.count(F)) {
627           auto *STy = cast<StructType>(F->getReturnType());
628           for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
629             TrackedMultipleRetVals[{F, I}] = ValueLatticeElement();
630           V = F;
631         }
632       } else if (auto *STy = dyn_cast<StructType>(Inst->getType())) {
633         for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
634           if (auto It = StructValueState.find({Inst, I});
635               It != StructValueState.end()) {
636             It->second = ValueLatticeElement();
637             V = Inst;
638           }
639         }
640       } else if (auto It = ValueState.find(Inst); It != ValueState.end()) {
641         It->second = ValueLatticeElement();
642         V = Inst;
643       }
644 
645       if (V) {
646         LLVM_DEBUG(dbgs() << "Invalidated lattice for " << *V << "\n");
647 
648         for (User *U : V->users())
649           if (auto *UI = dyn_cast<Instruction>(U))
650             ToInvalidate.push_back(UI);
651 
652         auto It = AdditionalUsers.find(V);
653         if (It != AdditionalUsers.end())
654           for (User *U : It->second)
655             if (auto *UI = dyn_cast<Instruction>(U))
656               ToInvalidate.push_back(UI);
657       }
658     }
659   }
660 
661   /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
662   /// work list if it is not already executable.
663   bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
664 
665   // getFeasibleSuccessors - Return a vector of booleans to indicate which
666   // successors are reachable from a given terminator instruction.
667   void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs);
668 
669   // Add U as additional user of V.
670   void addAdditionalUser(Value *V, User *U) { AdditionalUsers[V].insert(U); }
671 
672   void handleCallOverdefined(CallBase &CB);
673   void handleCallResult(CallBase &CB);
674   void handleCallArguments(CallBase &CB);
675   void handleExtractOfWithOverflow(ExtractValueInst &EVI,
676                                    const WithOverflowInst *WO, unsigned Idx);
677 
678 private:
679   friend class InstVisitor<SCCPInstVisitor>;
680 
681   // visit implementations - Something changed in this instruction.  Either an
682   // operand made a transition, or the instruction is newly executable.  Change
683   // the value type of I to reflect these changes if appropriate.
684   void visitPHINode(PHINode &I);
685 
686   // Terminators
687 
688   void visitReturnInst(ReturnInst &I);
689   void visitTerminator(Instruction &TI);
690 
691   void visitCastInst(CastInst &I);
692   void visitSelectInst(SelectInst &I);
693   void visitUnaryOperator(Instruction &I);
694   void visitFreezeInst(FreezeInst &I);
695   void visitBinaryOperator(Instruction &I);
696   void visitCmpInst(CmpInst &I);
697   void visitExtractValueInst(ExtractValueInst &EVI);
698   void visitInsertValueInst(InsertValueInst &IVI);
699 
700   void visitCatchSwitchInst(CatchSwitchInst &CPI) {
701     markOverdefined(&CPI);
702     visitTerminator(CPI);
703   }
704 
705   // Instructions that cannot be folded away.
706 
707   void visitStoreInst(StoreInst &I);
708   void visitLoadInst(LoadInst &I);
709   void visitGetElementPtrInst(GetElementPtrInst &I);
710   void visitAllocaInst(AllocaInst &AI);
711 
712   void visitInvokeInst(InvokeInst &II) {
713     visitCallBase(II);
714     visitTerminator(II);
715   }
716 
717   void visitCallBrInst(CallBrInst &CBI) {
718     visitCallBase(CBI);
719     visitTerminator(CBI);
720   }
721 
722   void visitCallBase(CallBase &CB);
723   void visitResumeInst(ResumeInst &I) { /*returns void*/
724   }
725   void visitUnreachableInst(UnreachableInst &I) { /*returns void*/
726   }
727   void visitFenceInst(FenceInst &I) { /*returns void*/
728   }
729 
730   void visitInstruction(Instruction &I);
731 
732 public:
733   void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC) {
734     FnPredicateInfo.insert({&F, std::make_unique<PredicateInfo>(
735                                     F, DT, AC, PredicateInfoAllocator)});
736   }
737 
738   void removeSSACopies(Function &F) {
739     auto It = FnPredicateInfo.find(&F);
740     if (It == FnPredicateInfo.end())
741       return;
742 
743     for (BasicBlock &BB : F) {
744       for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
745         if (auto *II = dyn_cast<IntrinsicInst>(&Inst)) {
746           if (II->getIntrinsicID() == Intrinsic::ssa_copy) {
747             if (It->second->getPredicateInfoFor(&Inst)) {
748               Value *Op = II->getOperand(0);
749               Inst.replaceAllUsesWith(Op);
750               Inst.eraseFromParent();
751             }
752           }
753         }
754       }
755     }
756   }
757 
758   void visitCallInst(CallInst &I) { visitCallBase(I); }
759 
760   bool markBlockExecutable(BasicBlock *BB);
761 
762   const PredicateBase *getPredicateInfoFor(Instruction *I) {
763     auto It = FnPredicateInfo.find(I->getParent()->getParent());
764     if (It == FnPredicateInfo.end())
765       return nullptr;
766     return It->second->getPredicateInfoFor(I);
767   }
768 
769   SCCPInstVisitor(const DataLayout &DL,
770                   std::function<const TargetLibraryInfo &(Function &)> GetTLI,
771                   LLVMContext &Ctx)
772       : DL(DL), GetTLI(GetTLI), Ctx(Ctx) {}
773 
774   void trackValueOfGlobalVariable(GlobalVariable *GV) {
775     // We only track the contents of scalar globals.
776     if (GV->getValueType()->isSingleValueType()) {
777       ValueLatticeElement &IV = TrackedGlobals[GV];
778       IV.markConstant(GV->getInitializer());
779     }
780   }
781 
782   void addTrackedFunction(Function *F) {
783     // Add an entry, F -> undef.
784     if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
785       MRVFunctionsTracked.insert(F);
786       for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
787         TrackedMultipleRetVals.try_emplace(std::make_pair(F, i));
788     } else if (!F->getReturnType()->isVoidTy())
789       TrackedRetVals.try_emplace(F);
790   }
791 
792   void addToMustPreserveReturnsInFunctions(Function *F) {
793     MustPreserveReturnsInFunctions.insert(F);
794   }
795 
796   bool mustPreserveReturn(Function *F) {
797     return MustPreserveReturnsInFunctions.count(F);
798   }
799 
800   void addArgumentTrackedFunction(Function *F) {
801     TrackingIncomingArguments.insert(F);
802   }
803 
804   bool isArgumentTrackedFunction(Function *F) {
805     return TrackingIncomingArguments.count(F);
806   }
807 
808   const SmallPtrSetImpl<Function *> &getArgumentTrackedFunctions() const {
809     return TrackingIncomingArguments;
810   }
811 
812   void solve();
813 
814   bool resolvedUndef(Instruction &I);
815 
816   bool resolvedUndefsIn(Function &F);
817 
818   bool isBlockExecutable(BasicBlock *BB) const {
819     return BBExecutable.count(BB);
820   }
821 
822   bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const;
823 
824   std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const {
825     std::vector<ValueLatticeElement> StructValues;
826     auto *STy = dyn_cast<StructType>(V->getType());
827     assert(STy && "getStructLatticeValueFor() can be called only on structs");
828     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
829       auto I = StructValueState.find(std::make_pair(V, i));
830       assert(I != StructValueState.end() && "Value not in valuemap!");
831       StructValues.push_back(I->second);
832     }
833     return StructValues;
834   }
835 
836   void removeLatticeValueFor(Value *V) { ValueState.erase(V); }
837 
838   /// Invalidate the Lattice Value of \p Call and its users after specializing
839   /// the call. Then recompute it.
840   void resetLatticeValueFor(CallBase *Call) {
841     // Calls to void returning functions do not need invalidation.
842     Function *F = Call->getCalledFunction();
843     (void)F;
844     assert(!F->getReturnType()->isVoidTy() &&
845            (TrackedRetVals.count(F) || MRVFunctionsTracked.count(F)) &&
846            "All non void specializations should be tracked");
847     invalidate(Call);
848     handleCallResult(*Call);
849   }
850 
851   const ValueLatticeElement &getLatticeValueFor(Value *V) const {
852     assert(!V->getType()->isStructTy() &&
853            "Should use getStructLatticeValueFor");
854     DenseMap<Value *, ValueLatticeElement>::const_iterator I =
855         ValueState.find(V);
856     assert(I != ValueState.end() &&
857            "V not found in ValueState nor Paramstate map!");
858     return I->second;
859   }
860 
861   const MapVector<Function *, ValueLatticeElement> &getTrackedRetVals() const {
862     return TrackedRetVals;
863   }
864 
865   const DenseMap<GlobalVariable *, ValueLatticeElement> &
866   getTrackedGlobals() const {
867     return TrackedGlobals;
868   }
869 
870   const SmallPtrSet<Function *, 16> &getMRVFunctionsTracked() const {
871     return MRVFunctionsTracked;
872   }
873 
874   void markOverdefined(Value *V) {
875     if (auto *STy = dyn_cast<StructType>(V->getType()))
876       for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
877         markOverdefined(getStructValueState(V, i), V);
878     else
879       markOverdefined(ValueState[V], V);
880   }
881 
882   ValueLatticeElement getArgAttributeVL(Argument *A) {
883     if (A->getType()->isIntOrIntVectorTy()) {
884       if (std::optional<ConstantRange> Range = A->getRange())
885         return ValueLatticeElement::getRange(*Range);
886     }
887     if (A->hasNonNullAttr())
888       return ValueLatticeElement::getNot(Constant::getNullValue(A->getType()));
889     // Assume nothing about the incoming arguments without attributes.
890     return ValueLatticeElement::getOverdefined();
891   }
892 
893   void trackValueOfArgument(Argument *A) {
894     if (A->getType()->isStructTy())
895       return (void)markOverdefined(A);
896     mergeInValue(A, getArgAttributeVL(A));
897   }
898 
899   bool isStructLatticeConstant(Function *F, StructType *STy);
900 
901   Constant *getConstant(const ValueLatticeElement &LV, Type *Ty) const;
902 
903   Constant *getConstantOrNull(Value *V) const;
904 
905   void setLatticeValueForSpecializationArguments(Function *F,
906                                        const SmallVectorImpl<ArgInfo> &Args);
907 
908   void markFunctionUnreachable(Function *F) {
909     for (auto &BB : *F)
910       BBExecutable.erase(&BB);
911   }
912 
913   void solveWhileResolvedUndefsIn(Module &M) {
914     bool ResolvedUndefs = true;
915     while (ResolvedUndefs) {
916       solve();
917       ResolvedUndefs = false;
918       for (Function &F : M)
919         ResolvedUndefs |= resolvedUndefsIn(F);
920     }
921   }
922 
923   void solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) {
924     bool ResolvedUndefs = true;
925     while (ResolvedUndefs) {
926       solve();
927       ResolvedUndefs = false;
928       for (Function *F : WorkList)
929         ResolvedUndefs |= resolvedUndefsIn(*F);
930     }
931   }
932 
933   void solveWhileResolvedUndefs() {
934     bool ResolvedUndefs = true;
935     while (ResolvedUndefs) {
936       solve();
937       ResolvedUndefs = false;
938       for (Value *V : Invalidated)
939         if (auto *I = dyn_cast<Instruction>(V))
940           ResolvedUndefs |= resolvedUndef(*I);
941     }
942     Invalidated.clear();
943   }
944 };
945 
946 } // namespace llvm
947 
948 bool SCCPInstVisitor::markBlockExecutable(BasicBlock *BB) {
949   if (!BBExecutable.insert(BB).second)
950     return false;
951   LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
952   BBWorkList.push_back(BB); // Add the block to the work list!
953   return true;
954 }
955 
956 void SCCPInstVisitor::pushToWorkList(Instruction *I) {
957   // If we're currently visiting a block, do not push any instructions in the
958   // same blocks that are after the current one, as they will be visited
959   // anyway. We do have to push updates to earlier instructions (e.g. phi
960   // nodes or loads of tracked globals).
961   if (CurI && I->getParent() == CurI->getParent() && !I->comesBefore(CurI))
962     return;
963   // Only push instructions in already visited blocks. Otherwise we'll handle
964   // it when we visit the block for the first time.
965   if (BBVisited.contains(I->getParent()))
966     InstWorkList.insert(I);
967 }
968 
969 void SCCPInstVisitor::pushUsersToWorkList(Value *V) {
970   for (User *U : V->users())
971     if (auto *UI = dyn_cast<Instruction>(U))
972       pushToWorkList(UI);
973 
974   auto Iter = AdditionalUsers.find(V);
975   if (Iter != AdditionalUsers.end()) {
976     // Copy additional users before notifying them of changes, because new
977     // users may be added, potentially invalidating the iterator.
978     SmallVector<Instruction *, 2> ToNotify;
979     for (User *U : Iter->second)
980       if (auto *UI = dyn_cast<Instruction>(U))
981         ToNotify.push_back(UI);
982     for (Instruction *UI : ToNotify)
983       pushToWorkList(UI);
984   }
985 }
986 
987 void SCCPInstVisitor::pushUsersToWorkListMsg(ValueLatticeElement &IV,
988                                              Value *V) {
989   LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n');
990   pushUsersToWorkList(V);
991 }
992 
993 bool SCCPInstVisitor::markConstant(ValueLatticeElement &IV, Value *V,
994                                    Constant *C, bool MayIncludeUndef) {
995   if (!IV.markConstant(C, MayIncludeUndef))
996     return false;
997   LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
998   pushUsersToWorkList(V);
999   return true;
1000 }
1001 
1002 bool SCCPInstVisitor::markNotConstant(ValueLatticeElement &IV, Value *V,
1003                                       Constant *C) {
1004   if (!IV.markNotConstant(C))
1005     return false;
1006   LLVM_DEBUG(dbgs() << "markNotConstant: " << *C << ": " << *V << '\n');
1007   pushUsersToWorkList(V);
1008   return true;
1009 }
1010 
1011 bool SCCPInstVisitor::markConstantRange(ValueLatticeElement &IV, Value *V,
1012                                         const ConstantRange &CR) {
1013   if (!IV.markConstantRange(CR))
1014     return false;
1015   LLVM_DEBUG(dbgs() << "markConstantRange: " << CR << ": " << *V << '\n');
1016   pushUsersToWorkList(V);
1017   return true;
1018 }
1019 
1020 bool SCCPInstVisitor::markOverdefined(ValueLatticeElement &IV, Value *V) {
1021   if (!IV.markOverdefined())
1022     return false;
1023 
1024   LLVM_DEBUG(dbgs() << "markOverdefined: ";
1025              if (auto *F = dyn_cast<Function>(V)) dbgs()
1026              << "Function '" << F->getName() << "'\n";
1027              else dbgs() << *V << '\n');
1028   // Only instructions go on the work list
1029   pushUsersToWorkList(V);
1030   return true;
1031 }
1032 
1033 bool SCCPInstVisitor::isStructLatticeConstant(Function *F, StructType *STy) {
1034   for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1035     const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
1036     assert(It != TrackedMultipleRetVals.end());
1037     ValueLatticeElement LV = It->second;
1038     if (!SCCPSolver::isConstant(LV))
1039       return false;
1040   }
1041   return true;
1042 }
1043 
1044 Constant *SCCPInstVisitor::getConstant(const ValueLatticeElement &LV,
1045                                        Type *Ty) const {
1046   if (LV.isConstant()) {
1047     Constant *C = LV.getConstant();
1048     assert(C->getType() == Ty && "Type mismatch");
1049     return C;
1050   }
1051 
1052   if (LV.isConstantRange()) {
1053     const auto &CR = LV.getConstantRange();
1054     if (CR.getSingleElement())
1055       return ConstantInt::get(Ty, *CR.getSingleElement());
1056   }
1057   return nullptr;
1058 }
1059 
1060 Constant *SCCPInstVisitor::getConstantOrNull(Value *V) const {
1061   Constant *Const = nullptr;
1062   if (V->getType()->isStructTy()) {
1063     std::vector<ValueLatticeElement> LVs = getStructLatticeValueFor(V);
1064     if (any_of(LVs, SCCPSolver::isOverdefined))
1065       return nullptr;
1066     std::vector<Constant *> ConstVals;
1067     auto *ST = cast<StructType>(V->getType());
1068     for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
1069       ValueLatticeElement LV = LVs[I];
1070       ConstVals.push_back(SCCPSolver::isConstant(LV)
1071                               ? getConstant(LV, ST->getElementType(I))
1072                               : UndefValue::get(ST->getElementType(I)));
1073     }
1074     Const = ConstantStruct::get(ST, ConstVals);
1075   } else {
1076     const ValueLatticeElement &LV = getLatticeValueFor(V);
1077     if (SCCPSolver::isOverdefined(LV))
1078       return nullptr;
1079     Const = SCCPSolver::isConstant(LV) ? getConstant(LV, V->getType())
1080                                        : UndefValue::get(V->getType());
1081   }
1082   assert(Const && "Constant is nullptr here!");
1083   return Const;
1084 }
1085 
1086 void SCCPInstVisitor::setLatticeValueForSpecializationArguments(Function *F,
1087                                         const SmallVectorImpl<ArgInfo> &Args) {
1088   assert(!Args.empty() && "Specialization without arguments");
1089   assert(F->arg_size() == Args[0].Formal->getParent()->arg_size() &&
1090          "Functions should have the same number of arguments");
1091 
1092   auto Iter = Args.begin();
1093   Function::arg_iterator NewArg = F->arg_begin();
1094   Function::arg_iterator OldArg = Args[0].Formal->getParent()->arg_begin();
1095   for (auto End = F->arg_end(); NewArg != End; ++NewArg, ++OldArg) {
1096 
1097     LLVM_DEBUG(dbgs() << "SCCP: Marking argument "
1098                       << NewArg->getNameOrAsOperand() << "\n");
1099 
1100     // Mark the argument constants in the new function
1101     // or copy the lattice state over from the old function.
1102     if (Iter != Args.end() && Iter->Formal == &*OldArg) {
1103       if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1104         for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1105           ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1106           NewValue.markConstant(Iter->Actual->getAggregateElement(I));
1107         }
1108       } else {
1109         ValueState[&*NewArg].markConstant(Iter->Actual);
1110       }
1111       ++Iter;
1112     } else {
1113       if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1114         for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1115           ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1116           NewValue = StructValueState[{&*OldArg, I}];
1117         }
1118       } else {
1119         ValueLatticeElement &NewValue = ValueState[&*NewArg];
1120         NewValue = ValueState[&*OldArg];
1121       }
1122     }
1123   }
1124 }
1125 
1126 void SCCPInstVisitor::visitInstruction(Instruction &I) {
1127   // All the instructions we don't do any special handling for just
1128   // go to overdefined.
1129   LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
1130   markOverdefined(&I);
1131 }
1132 
1133 bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
1134                                    ValueLatticeElement MergeWithV,
1135                                    ValueLatticeElement::MergeOptions Opts) {
1136   if (IV.mergeIn(MergeWithV, Opts)) {
1137     pushUsersToWorkList(V);
1138     LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : "
1139                       << IV << "\n");
1140     return true;
1141   }
1142   return false;
1143 }
1144 
1145 bool SCCPInstVisitor::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
1146   if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
1147     return false; // This edge is already known to be executable!
1148 
1149   if (!markBlockExecutable(Dest)) {
1150     // If the destination is already executable, we just made an *edge*
1151     // feasible that wasn't before.  Revisit the PHI nodes in the block
1152     // because they have potentially new operands.
1153     LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
1154                       << " -> " << Dest->getName() << '\n');
1155 
1156     for (PHINode &PN : Dest->phis())
1157       pushToWorkList(&PN);
1158   }
1159   return true;
1160 }
1161 
1162 // getFeasibleSuccessors - Return a vector of booleans to indicate which
1163 // successors are reachable from a given terminator instruction.
1164 void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
1165                                             SmallVectorImpl<bool> &Succs) {
1166   Succs.resize(TI.getNumSuccessors());
1167   if (auto *BI = dyn_cast<BranchInst>(&TI)) {
1168     if (BI->isUnconditional()) {
1169       Succs[0] = true;
1170       return;
1171     }
1172 
1173     ValueLatticeElement BCValue = getValueState(BI->getCondition());
1174     ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
1175     if (!CI) {
1176       // Overdefined condition variables, and branches on unfoldable constant
1177       // conditions, mean the branch could go either way.
1178       if (!BCValue.isUnknownOrUndef())
1179         Succs[0] = Succs[1] = true;
1180       return;
1181     }
1182 
1183     // Constant condition variables mean the branch can only go a single way.
1184     Succs[CI->isZero()] = true;
1185     return;
1186   }
1187 
1188   // We cannot analyze special terminators, so consider all successors
1189   // executable.
1190   if (TI.isSpecialTerminator()) {
1191     Succs.assign(TI.getNumSuccessors(), true);
1192     return;
1193   }
1194 
1195   if (auto *SI = dyn_cast<SwitchInst>(&TI)) {
1196     if (!SI->getNumCases()) {
1197       Succs[0] = true;
1198       return;
1199     }
1200     const ValueLatticeElement &SCValue = getValueState(SI->getCondition());
1201     if (ConstantInt *CI =
1202             getConstantInt(SCValue, SI->getCondition()->getType())) {
1203       Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true;
1204       return;
1205     }
1206 
1207     // TODO: Switch on undef is UB. Stop passing false once the rest of LLVM
1208     // is ready.
1209     if (SCValue.isConstantRange(/*UndefAllowed=*/false)) {
1210       const ConstantRange &Range = SCValue.getConstantRange();
1211       unsigned ReachableCaseCount = 0;
1212       for (const auto &Case : SI->cases()) {
1213         const APInt &CaseValue = Case.getCaseValue()->getValue();
1214         if (Range.contains(CaseValue)) {
1215           Succs[Case.getSuccessorIndex()] = true;
1216           ++ReachableCaseCount;
1217         }
1218       }
1219 
1220       Succs[SI->case_default()->getSuccessorIndex()] =
1221           Range.isSizeLargerThan(ReachableCaseCount);
1222       return;
1223     }
1224 
1225     // Overdefined or unknown condition? All destinations are executable!
1226     if (!SCValue.isUnknownOrUndef())
1227       Succs.assign(TI.getNumSuccessors(), true);
1228     return;
1229   }
1230 
1231   // In case of indirect branch and its address is a blockaddress, we mark
1232   // the target as executable.
1233   if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
1234     // Casts are folded by visitCastInst.
1235     ValueLatticeElement IBRValue = getValueState(IBR->getAddress());
1236     BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(
1237         getConstant(IBRValue, IBR->getAddress()->getType()));
1238     if (!Addr) { // Overdefined or unknown condition?
1239       // All destinations are executable!
1240       if (!IBRValue.isUnknownOrUndef())
1241         Succs.assign(TI.getNumSuccessors(), true);
1242       return;
1243     }
1244 
1245     BasicBlock *T = Addr->getBasicBlock();
1246     assert(Addr->getFunction() == T->getParent() &&
1247            "Block address of a different function ?");
1248     for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) {
1249       // This is the target.
1250       if (IBR->getDestination(i) == T) {
1251         Succs[i] = true;
1252         return;
1253       }
1254     }
1255 
1256     // If we didn't find our destination in the IBR successor list, then we
1257     // have undefined behavior. Its ok to assume no successor is executable.
1258     return;
1259   }
1260 
1261   LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
1262   llvm_unreachable("SCCP: Don't know how to handle this terminator!");
1263 }
1264 
1265 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
1266 // block to the 'To' basic block is currently feasible.
1267 bool SCCPInstVisitor::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const {
1268   // Check if we've called markEdgeExecutable on the edge yet. (We could
1269   // be more aggressive and try to consider edges which haven't been marked
1270   // yet, but there isn't any need.)
1271   return KnownFeasibleEdges.count(Edge(From, To));
1272 }
1273 
1274 // visit Implementations - Something changed in this instruction, either an
1275 // operand made a transition, or the instruction is newly executable.  Change
1276 // the value type of I to reflect these changes if appropriate.  This method
1277 // makes sure to do the following actions:
1278 //
1279 // 1. If a phi node merges two constants in, and has conflicting value coming
1280 //    from different branches, or if the PHI node merges in an overdefined
1281 //    value, then the PHI node becomes overdefined.
1282 // 2. If a phi node merges only constants in, and they all agree on value, the
1283 //    PHI node becomes a constant value equal to that.
1284 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
1285 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
1286 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
1287 // 6. If a conditional branch has a value that is constant, make the selected
1288 //    destination executable
1289 // 7. If a conditional branch has a value that is overdefined, make all
1290 //    successors executable.
1291 void SCCPInstVisitor::visitPHINode(PHINode &PN) {
1292   // If this PN returns a struct, just mark the result overdefined.
1293   // TODO: We could do a lot better than this if code actually uses this.
1294   if (PN.getType()->isStructTy())
1295     return (void)markOverdefined(&PN);
1296 
1297   if (getValueState(&PN).isOverdefined())
1298     return; // Quick exit
1299 
1300   // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
1301   // and slow us down a lot.  Just mark them overdefined.
1302   if (PN.getNumIncomingValues() > 64)
1303     return (void)markOverdefined(&PN);
1304 
1305   unsigned NumActiveIncoming = 0;
1306 
1307   // Look at all of the executable operands of the PHI node.  If any of them
1308   // are overdefined, the PHI becomes overdefined as well.  If they are all
1309   // constant, and they agree with each other, the PHI becomes the identical
1310   // constant.  If they are constant and don't agree, the PHI is a constant
1311   // range. If there are no executable operands, the PHI remains unknown.
1312   ValueLatticeElement PhiState = getValueState(&PN);
1313   for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1314     if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
1315       continue;
1316 
1317     ValueLatticeElement IV = getValueState(PN.getIncomingValue(i));
1318     PhiState.mergeIn(IV);
1319     NumActiveIncoming++;
1320     if (PhiState.isOverdefined())
1321       break;
1322   }
1323 
1324   // We allow up to 1 range extension per active incoming value and one
1325   // additional extension. Note that we manually adjust the number of range
1326   // extensions to match the number of active incoming values. This helps to
1327   // limit multiple extensions caused by the same incoming value, if other
1328   // incoming values are equal.
1329   mergeInValue(&PN, PhiState,
1330                ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1331                    NumActiveIncoming + 1));
1332   ValueLatticeElement &PhiStateRef = getValueState(&PN);
1333   PhiStateRef.setNumRangeExtensions(
1334       std::max(NumActiveIncoming, PhiStateRef.getNumRangeExtensions()));
1335 }
1336 
1337 void SCCPInstVisitor::visitReturnInst(ReturnInst &I) {
1338   if (I.getNumOperands() == 0)
1339     return; // ret void
1340 
1341   Function *F = I.getParent()->getParent();
1342   Value *ResultOp = I.getOperand(0);
1343 
1344   // If we are tracking the return value of this function, merge it in.
1345   if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
1346     auto TFRVI = TrackedRetVals.find(F);
1347     if (TFRVI != TrackedRetVals.end()) {
1348       mergeInValue(TFRVI->second, F, getValueState(ResultOp));
1349       return;
1350     }
1351   }
1352 
1353   // Handle functions that return multiple values.
1354   if (!TrackedMultipleRetVals.empty()) {
1355     if (auto *STy = dyn_cast<StructType>(ResultOp->getType()))
1356       if (MRVFunctionsTracked.count(F))
1357         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1358           mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
1359                        getStructValueState(ResultOp, i));
1360   }
1361 }
1362 
1363 void SCCPInstVisitor::visitTerminator(Instruction &TI) {
1364   SmallVector<bool, 16> SuccFeasible;
1365   getFeasibleSuccessors(TI, SuccFeasible);
1366 
1367   BasicBlock *BB = TI.getParent();
1368 
1369   // Mark all feasible successors executable.
1370   for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
1371     if (SuccFeasible[i])
1372       markEdgeExecutable(BB, TI.getSuccessor(i));
1373 }
1374 
1375 void SCCPInstVisitor::visitCastInst(CastInst &I) {
1376   // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1377   // discover a concrete value later.
1378   if (ValueState[&I].isOverdefined())
1379     return;
1380 
1381   ValueLatticeElement OpSt = getValueState(I.getOperand(0));
1382   if (OpSt.isUnknownOrUndef())
1383     return;
1384 
1385   if (Constant *OpC = getConstant(OpSt, I.getOperand(0)->getType())) {
1386     // Fold the constant as we build.
1387     if (Constant *C =
1388             ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL))
1389       return (void)markConstant(&I, C);
1390   }
1391 
1392   // Ignore bitcasts, as they may change the number of vector elements.
1393   if (I.getDestTy()->isIntOrIntVectorTy() &&
1394       I.getSrcTy()->isIntOrIntVectorTy() &&
1395       I.getOpcode() != Instruction::BitCast) {
1396     auto &LV = getValueState(&I);
1397     ConstantRange OpRange =
1398         OpSt.asConstantRange(I.getSrcTy(), /*UndefAllowed=*/false);
1399 
1400     Type *DestTy = I.getDestTy();
1401     ConstantRange Res =
1402         OpRange.castOp(I.getOpcode(), DestTy->getScalarSizeInBits());
1403     mergeInValue(LV, &I, ValueLatticeElement::getRange(Res));
1404   } else
1405     markOverdefined(&I);
1406 }
1407 
1408 void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
1409                                                   const WithOverflowInst *WO,
1410                                                   unsigned Idx) {
1411   Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
1412   ValueLatticeElement L = getValueState(LHS);
1413   ValueLatticeElement R = getValueState(RHS);
1414   addAdditionalUser(LHS, &EVI);
1415   addAdditionalUser(RHS, &EVI);
1416   if (L.isUnknownOrUndef() || R.isUnknownOrUndef())
1417     return; // Wait to resolve.
1418 
1419   Type *Ty = LHS->getType();
1420   ConstantRange LR = L.asConstantRange(Ty, /*UndefAllowed=*/false);
1421   ConstantRange RR = R.asConstantRange(Ty, /*UndefAllowed=*/false);
1422   if (Idx == 0) {
1423     ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
1424     mergeInValue(&EVI, ValueLatticeElement::getRange(Res));
1425   } else {
1426     assert(Idx == 1 && "Index can only be 0 or 1");
1427     ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
1428         WO->getBinaryOp(), RR, WO->getNoWrapKind());
1429     if (NWRegion.contains(LR))
1430       return (void)markConstant(&EVI, ConstantInt::getFalse(EVI.getType()));
1431     markOverdefined(&EVI);
1432   }
1433 }
1434 
1435 void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
1436   // If this returns a struct, mark all elements over defined, we don't track
1437   // structs in structs.
1438   if (EVI.getType()->isStructTy())
1439     return (void)markOverdefined(&EVI);
1440 
1441   // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1442   // discover a concrete value later.
1443   if (ValueState[&EVI].isOverdefined())
1444     return (void)markOverdefined(&EVI);
1445 
1446   // If this is extracting from more than one level of struct, we don't know.
1447   if (EVI.getNumIndices() != 1)
1448     return (void)markOverdefined(&EVI);
1449 
1450   Value *AggVal = EVI.getAggregateOperand();
1451   if (AggVal->getType()->isStructTy()) {
1452     unsigned i = *EVI.idx_begin();
1453     if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
1454       return handleExtractOfWithOverflow(EVI, WO, i);
1455     ValueLatticeElement EltVal = getStructValueState(AggVal, i);
1456     mergeInValue(getValueState(&EVI), &EVI, EltVal);
1457   } else {
1458     // Otherwise, must be extracting from an array.
1459     return (void)markOverdefined(&EVI);
1460   }
1461 }
1462 
1463 void SCCPInstVisitor::visitInsertValueInst(InsertValueInst &IVI) {
1464   auto *STy = dyn_cast<StructType>(IVI.getType());
1465   if (!STy)
1466     return (void)markOverdefined(&IVI);
1467 
1468   // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1469   // discover a concrete value later.
1470   if (ValueState[&IVI].isOverdefined())
1471     return (void)markOverdefined(&IVI);
1472 
1473   // If this has more than one index, we can't handle it, drive all results to
1474   // undef.
1475   if (IVI.getNumIndices() != 1)
1476     return (void)markOverdefined(&IVI);
1477 
1478   Value *Aggr = IVI.getAggregateOperand();
1479   unsigned Idx = *IVI.idx_begin();
1480 
1481   // Compute the result based on what we're inserting.
1482   for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1483     // This passes through all values that aren't the inserted element.
1484     if (i != Idx) {
1485       ValueLatticeElement EltVal = getStructValueState(Aggr, i);
1486       mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
1487       continue;
1488     }
1489 
1490     Value *Val = IVI.getInsertedValueOperand();
1491     if (Val->getType()->isStructTy())
1492       // We don't track structs in structs.
1493       markOverdefined(getStructValueState(&IVI, i), &IVI);
1494     else {
1495       ValueLatticeElement InVal = getValueState(Val);
1496       mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
1497     }
1498   }
1499 }
1500 
1501 void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
1502   // If this select returns a struct, just mark the result overdefined.
1503   // TODO: We could do a lot better than this if code actually uses this.
1504   if (I.getType()->isStructTy())
1505     return (void)markOverdefined(&I);
1506 
1507   // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1508   // discover a concrete value later.
1509   if (ValueState[&I].isOverdefined())
1510     return (void)markOverdefined(&I);
1511 
1512   ValueLatticeElement CondValue = getValueState(I.getCondition());
1513   if (CondValue.isUnknownOrUndef())
1514     return;
1515 
1516   if (ConstantInt *CondCB =
1517           getConstantInt(CondValue, I.getCondition()->getType())) {
1518     Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
1519     mergeInValue(&I, getValueState(OpVal));
1520     return;
1521   }
1522 
1523   // Otherwise, the condition is overdefined or a constant we can't evaluate.
1524   // See if we can produce something better than overdefined based on the T/F
1525   // value.
1526   ValueLatticeElement TVal = getValueState(I.getTrueValue());
1527   ValueLatticeElement FVal = getValueState(I.getFalseValue());
1528 
1529   ValueLatticeElement &State = ValueState[&I];
1530   bool Changed = State.mergeIn(TVal);
1531   Changed |= State.mergeIn(FVal);
1532   if (Changed)
1533     pushUsersToWorkListMsg(State, &I);
1534 }
1535 
1536 // Handle Unary Operators.
1537 void SCCPInstVisitor::visitUnaryOperator(Instruction &I) {
1538   ValueLatticeElement V0State = getValueState(I.getOperand(0));
1539 
1540   ValueLatticeElement &IV = ValueState[&I];
1541   // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1542   // discover a concrete value later.
1543   if (IV.isOverdefined())
1544     return (void)markOverdefined(&I);
1545 
1546   // If something is unknown/undef, wait for it to resolve.
1547   if (V0State.isUnknownOrUndef())
1548     return;
1549 
1550   if (SCCPSolver::isConstant(V0State))
1551     if (Constant *C = ConstantFoldUnaryOpOperand(
1552             I.getOpcode(), getConstant(V0State, I.getType()), DL))
1553       return (void)markConstant(IV, &I, C);
1554 
1555   markOverdefined(&I);
1556 }
1557 
1558 void SCCPInstVisitor::visitFreezeInst(FreezeInst &I) {
1559   // If this freeze returns a struct, just mark the result overdefined.
1560   // TODO: We could do a lot better than this.
1561   if (I.getType()->isStructTy())
1562     return (void)markOverdefined(&I);
1563 
1564   ValueLatticeElement V0State = getValueState(I.getOperand(0));
1565   ValueLatticeElement &IV = ValueState[&I];
1566   // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1567   // discover a concrete value later.
1568   if (IV.isOverdefined())
1569     return (void)markOverdefined(&I);
1570 
1571   // If something is unknown/undef, wait for it to resolve.
1572   if (V0State.isUnknownOrUndef())
1573     return;
1574 
1575   if (SCCPSolver::isConstant(V0State) &&
1576       isGuaranteedNotToBeUndefOrPoison(getConstant(V0State, I.getType())))
1577     return (void)markConstant(IV, &I, getConstant(V0State, I.getType()));
1578 
1579   markOverdefined(&I);
1580 }
1581 
1582 // Handle Binary Operators.
1583 void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
1584   ValueLatticeElement V1State = getValueState(I.getOperand(0));
1585   ValueLatticeElement V2State = getValueState(I.getOperand(1));
1586 
1587   ValueLatticeElement &IV = ValueState[&I];
1588   if (IV.isOverdefined())
1589     return;
1590 
1591   // If something is undef, wait for it to resolve.
1592   if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef())
1593     return;
1594 
1595   if (V1State.isOverdefined() && V2State.isOverdefined())
1596     return (void)markOverdefined(&I);
1597 
1598   // If either of the operands is a constant, try to fold it to a constant.
1599   // TODO: Use information from notconstant better.
1600   if ((V1State.isConstant() || V2State.isConstant())) {
1601     Value *V1 = SCCPSolver::isConstant(V1State)
1602                     ? getConstant(V1State, I.getOperand(0)->getType())
1603                     : I.getOperand(0);
1604     Value *V2 = SCCPSolver::isConstant(V2State)
1605                     ? getConstant(V2State, I.getOperand(1)->getType())
1606                     : I.getOperand(1);
1607     Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL, &I));
1608     auto *C = dyn_cast_or_null<Constant>(R);
1609     if (C) {
1610       // Conservatively assume that the result may be based on operands that may
1611       // be undef. Note that we use mergeInValue to combine the constant with
1612       // the existing lattice value for I, as different constants might be found
1613       // after one of the operands go to overdefined, e.g. due to one operand
1614       // being a special floating value.
1615       ValueLatticeElement NewV;
1616       NewV.markConstant(C, /*MayIncludeUndef=*/true);
1617       return (void)mergeInValue(&I, NewV);
1618     }
1619   }
1620 
1621   // Only use ranges for binary operators on integers.
1622   if (!I.getType()->isIntOrIntVectorTy())
1623     return markOverdefined(&I);
1624 
1625   // Try to simplify to a constant range.
1626   ConstantRange A =
1627       V1State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1628   ConstantRange B =
1629       V2State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1630 
1631   auto *BO = cast<BinaryOperator>(&I);
1632   ConstantRange R = ConstantRange::getEmpty(I.getType()->getScalarSizeInBits());
1633   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO))
1634     R = A.overflowingBinaryOp(BO->getOpcode(), B, OBO->getNoWrapKind());
1635   else
1636     R = A.binaryOp(BO->getOpcode(), B);
1637   mergeInValue(&I, ValueLatticeElement::getRange(R));
1638 
1639   // TODO: Currently we do not exploit special values that produce something
1640   // better than overdefined with an overdefined operand for vector or floating
1641   // point types, like and <4 x i32> overdefined, zeroinitializer.
1642 }
1643 
1644 // Handle ICmpInst instruction.
1645 void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
1646   // Do not cache this lookup, getValueState calls later in the function might
1647   // invalidate the reference.
1648   if (ValueState[&I].isOverdefined())
1649     return (void)markOverdefined(&I);
1650 
1651   Value *Op1 = I.getOperand(0);
1652   Value *Op2 = I.getOperand(1);
1653 
1654   // For parameters, use ParamState which includes constant range info if
1655   // available.
1656   auto V1State = getValueState(Op1);
1657   auto V2State = getValueState(Op2);
1658 
1659   Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State, DL);
1660   if (C) {
1661     ValueLatticeElement CV;
1662     CV.markConstant(C);
1663     mergeInValue(&I, CV);
1664     return;
1665   }
1666 
1667   // If operands are still unknown, wait for it to resolve.
1668   if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) &&
1669       !SCCPSolver::isConstant(ValueState[&I]))
1670     return;
1671 
1672   markOverdefined(&I);
1673 }
1674 
1675 // Handle getelementptr instructions.  If all operands are constants then we
1676 // can turn this into a getelementptr ConstantExpr.
1677 void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
1678   if (ValueState[&I].isOverdefined())
1679     return (void)markOverdefined(&I);
1680 
1681   const ValueLatticeElement &PtrState = getValueState(I.getPointerOperand());
1682   if (PtrState.isUnknownOrUndef())
1683     return;
1684 
1685   // gep inbounds/nuw of non-null is non-null.
1686   if (PtrState.isNotConstant() && PtrState.getNotConstant()->isNullValue()) {
1687     if (I.hasNoUnsignedWrap() ||
1688         (I.isInBounds() &&
1689          !NullPointerIsDefined(I.getFunction(), I.getAddressSpace())))
1690       return (void)markNotNull(ValueState[&I], &I);
1691     return (void)markOverdefined(&I);
1692   }
1693 
1694   SmallVector<Constant *, 8> Operands;
1695   Operands.reserve(I.getNumOperands());
1696 
1697   for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
1698     ValueLatticeElement State = getValueState(I.getOperand(i));
1699     if (State.isUnknownOrUndef())
1700       return; // Operands are not resolved yet.
1701 
1702     if (Constant *C = getConstant(State, I.getOperand(i)->getType())) {
1703       Operands.push_back(C);
1704       continue;
1705     }
1706 
1707     return (void)markOverdefined(&I);
1708   }
1709 
1710   if (Constant *C = ConstantFoldInstOperands(&I, Operands, DL))
1711     markConstant(&I, C);
1712   else
1713     markOverdefined(&I);
1714 }
1715 
1716 void SCCPInstVisitor::visitAllocaInst(AllocaInst &I) {
1717   if (!NullPointerIsDefined(I.getFunction(), I.getAddressSpace()))
1718     return (void)markNotNull(ValueState[&I], &I);
1719 
1720   markOverdefined(&I);
1721 }
1722 
1723 void SCCPInstVisitor::visitStoreInst(StoreInst &SI) {
1724   // If this store is of a struct, ignore it.
1725   if (SI.getOperand(0)->getType()->isStructTy())
1726     return;
1727 
1728   if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
1729     return;
1730 
1731   GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
1732   auto I = TrackedGlobals.find(GV);
1733   if (I == TrackedGlobals.end())
1734     return;
1735 
1736   // Get the value we are storing into the global, then merge it.
1737   mergeInValue(I->second, GV, getValueState(SI.getOperand(0)),
1738                ValueLatticeElement::MergeOptions().setCheckWiden(false));
1739   if (I->second.isOverdefined())
1740     TrackedGlobals.erase(I); // No need to keep tracking this!
1741 }
1742 
1743 static ValueLatticeElement getValueFromMetadata(const Instruction *I) {
1744   if (const auto *CB = dyn_cast<CallBase>(I)) {
1745     if (CB->getType()->isIntOrIntVectorTy())
1746       if (std::optional<ConstantRange> Range = CB->getRange())
1747         return ValueLatticeElement::getRange(*Range);
1748     if (CB->getType()->isPointerTy() && CB->isReturnNonNull())
1749       return ValueLatticeElement::getNot(
1750           ConstantPointerNull::get(cast<PointerType>(I->getType())));
1751   }
1752 
1753   if (I->getType()->isIntOrIntVectorTy())
1754     if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range))
1755       return ValueLatticeElement::getRange(
1756           getConstantRangeFromMetadata(*Ranges));
1757   if (I->hasMetadata(LLVMContext::MD_nonnull))
1758     return ValueLatticeElement::getNot(
1759         ConstantPointerNull::get(cast<PointerType>(I->getType())));
1760 
1761   return ValueLatticeElement::getOverdefined();
1762 }
1763 
1764 // Handle load instructions.  If the operand is a constant pointer to a constant
1765 // global, we can replace the load with the loaded constant value!
1766 void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
1767   // If this load is of a struct or the load is volatile, just mark the result
1768   // as overdefined.
1769   if (I.getType()->isStructTy() || I.isVolatile())
1770     return (void)markOverdefined(&I);
1771 
1772   // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1773   // discover a concrete value later.
1774   if (ValueState[&I].isOverdefined())
1775     return (void)markOverdefined(&I);
1776 
1777   ValueLatticeElement PtrVal = getValueState(I.getOperand(0));
1778   if (PtrVal.isUnknownOrUndef())
1779     return; // The pointer is not resolved yet!
1780 
1781   ValueLatticeElement &IV = ValueState[&I];
1782 
1783   if (SCCPSolver::isConstant(PtrVal)) {
1784     Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
1785 
1786     // load null is undefined.
1787     if (isa<ConstantPointerNull>(Ptr)) {
1788       if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace()))
1789         return (void)markOverdefined(IV, &I);
1790       else
1791         return;
1792     }
1793 
1794     // Transform load (constant global) into the value loaded.
1795     if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) {
1796       if (!TrackedGlobals.empty()) {
1797         // If we are tracking this global, merge in the known value for it.
1798         auto It = TrackedGlobals.find(GV);
1799         if (It != TrackedGlobals.end()) {
1800           mergeInValue(IV, &I, It->second, getMaxWidenStepsOpts());
1801           return;
1802         }
1803       }
1804     }
1805 
1806     // Transform load from a constant into a constant if possible.
1807     if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL))
1808       return (void)markConstant(IV, &I, C);
1809   }
1810 
1811   // Fall back to metadata.
1812   mergeInValue(&I, getValueFromMetadata(&I));
1813 }
1814 
1815 void SCCPInstVisitor::visitCallBase(CallBase &CB) {
1816   handleCallResult(CB);
1817   handleCallArguments(CB);
1818 }
1819 
1820 void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
1821   Function *F = CB.getCalledFunction();
1822 
1823   // Void return and not tracking callee, just bail.
1824   if (CB.getType()->isVoidTy())
1825     return;
1826 
1827   // Always mark struct return as overdefined.
1828   if (CB.getType()->isStructTy())
1829     return (void)markOverdefined(&CB);
1830 
1831   // Otherwise, if we have a single return value case, and if the function is
1832   // a declaration, maybe we can constant fold it.
1833   if (F && F->isDeclaration() && canConstantFoldCallTo(&CB, F)) {
1834     SmallVector<Constant *, 8> Operands;
1835     for (const Use &A : CB.args()) {
1836       if (A.get()->getType()->isStructTy())
1837         return markOverdefined(&CB); // Can't handle struct args.
1838       if (A.get()->getType()->isMetadataTy())
1839         continue;                    // Carried in CB, not allowed in Operands.
1840       ValueLatticeElement State = getValueState(A);
1841 
1842       if (State.isUnknownOrUndef())
1843         return; // Operands are not resolved yet.
1844       if (SCCPSolver::isOverdefined(State))
1845         return (void)markOverdefined(&CB);
1846       assert(SCCPSolver::isConstant(State) && "Unknown state!");
1847       Operands.push_back(getConstant(State, A->getType()));
1848     }
1849 
1850     if (SCCPSolver::isOverdefined(getValueState(&CB)))
1851       return (void)markOverdefined(&CB);
1852 
1853     // If we can constant fold this, mark the result of the call as a
1854     // constant.
1855     if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F)))
1856       return (void)markConstant(&CB, C);
1857   }
1858 
1859   // Fall back to metadata.
1860   mergeInValue(&CB, getValueFromMetadata(&CB));
1861 }
1862 
1863 void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
1864   Function *F = CB.getCalledFunction();
1865   // If this is a local function that doesn't have its address taken, mark its
1866   // entry block executable and merge in the actual arguments to the call into
1867   // the formal arguments of the function.
1868   if (TrackingIncomingArguments.count(F)) {
1869     markBlockExecutable(&F->front());
1870 
1871     // Propagate information from this call site into the callee.
1872     auto CAI = CB.arg_begin();
1873     for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
1874          ++AI, ++CAI) {
1875       // If this argument is byval, and if the function is not readonly, there
1876       // will be an implicit copy formed of the input aggregate.
1877       if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
1878         markOverdefined(&*AI);
1879         continue;
1880       }
1881 
1882       if (auto *STy = dyn_cast<StructType>(AI->getType())) {
1883         for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1884           ValueLatticeElement CallArg = getStructValueState(*CAI, i);
1885           mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
1886                        getMaxWidenStepsOpts());
1887         }
1888       } else
1889         mergeInValue(&*AI,
1890                      getValueState(*CAI).intersect(getArgAttributeVL(&*AI)),
1891                      getMaxWidenStepsOpts());
1892     }
1893   }
1894 }
1895 
1896 void SCCPInstVisitor::handleCallResult(CallBase &CB) {
1897   Function *F = CB.getCalledFunction();
1898 
1899   if (auto *II = dyn_cast<IntrinsicInst>(&CB)) {
1900     if (II->getIntrinsicID() == Intrinsic::ssa_copy) {
1901       if (ValueState[&CB].isOverdefined())
1902         return;
1903 
1904       Value *CopyOf = CB.getOperand(0);
1905       ValueLatticeElement CopyOfVal = getValueState(CopyOf);
1906       const auto *PI = getPredicateInfoFor(&CB);
1907       assert(PI && "Missing predicate info for ssa.copy");
1908 
1909       const std::optional<PredicateConstraint> &Constraint =
1910           PI->getConstraint();
1911       if (!Constraint) {
1912         mergeInValue(ValueState[&CB], &CB, CopyOfVal);
1913         return;
1914       }
1915 
1916       CmpInst::Predicate Pred = Constraint->Predicate;
1917       Value *OtherOp = Constraint->OtherOp;
1918 
1919       // Wait until OtherOp is resolved.
1920       if (getValueState(OtherOp).isUnknown()) {
1921         addAdditionalUser(OtherOp, &CB);
1922         return;
1923       }
1924 
1925       ValueLatticeElement CondVal = getValueState(OtherOp);
1926       ValueLatticeElement &IV = ValueState[&CB];
1927       if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
1928         auto ImposedCR =
1929             ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
1930 
1931         // Get the range imposed by the condition.
1932         if (CondVal.isConstantRange())
1933           ImposedCR = ConstantRange::makeAllowedICmpRegion(
1934               Pred, CondVal.getConstantRange());
1935 
1936         // Combine range info for the original value with the new range from the
1937         // condition.
1938         auto CopyOfCR = CopyOfVal.asConstantRange(CopyOf->getType(),
1939                                                   /*UndefAllowed=*/true);
1940         // Treat an unresolved input like a full range.
1941         if (CopyOfCR.isEmptySet())
1942           CopyOfCR = ConstantRange::getFull(CopyOfCR.getBitWidth());
1943         auto NewCR = ImposedCR.intersectWith(CopyOfCR);
1944         // If the existing information is != x, do not use the information from
1945         // a chained predicate, as the != x information is more likely to be
1946         // helpful in practice.
1947         if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
1948           NewCR = CopyOfCR;
1949 
1950         // The new range is based on a branch condition. That guarantees that
1951         // neither of the compare operands can be undef in the branch targets,
1952         // unless we have conditions that are always true/false (e.g. icmp ule
1953         // i32, %a, i32_max). For the latter overdefined/empty range will be
1954         // inferred, but the branch will get folded accordingly anyways.
1955         addAdditionalUser(OtherOp, &CB);
1956         mergeInValue(
1957             IV, &CB,
1958             ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
1959         return;
1960       } else if (Pred == CmpInst::ICMP_EQ &&
1961                  (CondVal.isConstant() || CondVal.isNotConstant())) {
1962         // For non-integer values or integer constant expressions, only
1963         // propagate equal constants or not-constants.
1964         addAdditionalUser(OtherOp, &CB);
1965         mergeInValue(IV, &CB, CondVal);
1966         return;
1967       } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
1968         // Propagate inequalities.
1969         addAdditionalUser(OtherOp, &CB);
1970         mergeInValue(IV, &CB,
1971                      ValueLatticeElement::getNot(CondVal.getConstant()));
1972         return;
1973       }
1974 
1975       return (void)mergeInValue(IV, &CB, CopyOfVal);
1976     }
1977 
1978     if (II->getIntrinsicID() == Intrinsic::vscale) {
1979       unsigned BitWidth = CB.getType()->getScalarSizeInBits();
1980       const ConstantRange Result = getVScaleRange(II->getFunction(), BitWidth);
1981       return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
1982     }
1983 
1984     if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1985       // Compute result range for intrinsics supported by ConstantRange.
1986       // Do this even if we don't know a range for all operands, as we may
1987       // still know something about the result range, e.g. of abs(x).
1988       SmallVector<ConstantRange, 2> OpRanges;
1989       for (Value *Op : II->args()) {
1990         const ValueLatticeElement &State = getValueState(Op);
1991         if (State.isUnknownOrUndef())
1992           return;
1993         OpRanges.push_back(
1994             State.asConstantRange(Op->getType(), /*UndefAllowed=*/false));
1995       }
1996 
1997       ConstantRange Result =
1998           ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
1999       return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
2000     }
2001   }
2002 
2003   // The common case is that we aren't tracking the callee, either because we
2004   // are not doing interprocedural analysis or the callee is indirect, or is
2005   // external.  Handle these cases first.
2006   if (!F || F->isDeclaration())
2007     return handleCallOverdefined(CB);
2008 
2009   // If this is a single/zero retval case, see if we're tracking the function.
2010   if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
2011     if (!MRVFunctionsTracked.count(F))
2012       return handleCallOverdefined(CB); // Not tracking this callee.
2013 
2014     // If we are tracking this callee, propagate the result of the function
2015     // into this call site.
2016     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2017       mergeInValue(getStructValueState(&CB, i), &CB,
2018                    TrackedMultipleRetVals[std::make_pair(F, i)],
2019                    getMaxWidenStepsOpts());
2020   } else {
2021     auto TFRVI = TrackedRetVals.find(F);
2022     if (TFRVI == TrackedRetVals.end())
2023       return handleCallOverdefined(CB); // Not tracking this callee.
2024 
2025     // If so, propagate the return value of the callee into this call result.
2026     mergeInValue(&CB, TFRVI->second, getMaxWidenStepsOpts());
2027   }
2028 }
2029 
2030 void SCCPInstVisitor::solve() {
2031   // Process the work lists until they are empty!
2032   while (!BBWorkList.empty() || !InstWorkList.empty()) {
2033     // Process the instruction work list.
2034     while (!InstWorkList.empty()) {
2035       Instruction *I = InstWorkList.pop_back_val();
2036       Invalidated.erase(I);
2037 
2038       LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
2039 
2040       visit(I);
2041     }
2042 
2043     // Process the basic block work list.
2044     while (!BBWorkList.empty()) {
2045       BasicBlock *BB = BBWorkList.pop_back_val();
2046       BBVisited.insert(BB);
2047 
2048       LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
2049       for (Instruction &I : *BB) {
2050         CurI = &I;
2051         visit(I);
2052       }
2053       CurI = nullptr;
2054     }
2055   }
2056 }
2057 
2058 bool SCCPInstVisitor::resolvedUndef(Instruction &I) {
2059   // Look for instructions which produce undef values.
2060   if (I.getType()->isVoidTy())
2061     return false;
2062 
2063   if (auto *STy = dyn_cast<StructType>(I.getType())) {
2064     // Only a few things that can be structs matter for undef.
2065 
2066     // Tracked calls must never be marked overdefined in resolvedUndefsIn.
2067     if (auto *CB = dyn_cast<CallBase>(&I))
2068       if (Function *F = CB->getCalledFunction())
2069         if (MRVFunctionsTracked.count(F))
2070           return false;
2071 
2072     // extractvalue and insertvalue don't need to be marked; they are
2073     // tracked as precisely as their operands.
2074     if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I))
2075       return false;
2076     // Send the results of everything else to overdefined.  We could be
2077     // more precise than this but it isn't worth bothering.
2078     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2079       ValueLatticeElement &LV = getStructValueState(&I, i);
2080       if (LV.isUnknown()) {
2081         markOverdefined(LV, &I);
2082         return true;
2083       }
2084     }
2085     return false;
2086   }
2087 
2088   ValueLatticeElement &LV = getValueState(&I);
2089   if (!LV.isUnknown())
2090     return false;
2091 
2092   // There are two reasons a call can have an undef result
2093   // 1. It could be tracked.
2094   // 2. It could be constant-foldable.
2095   // Because of the way we solve return values, tracked calls must
2096   // never be marked overdefined in resolvedUndefsIn.
2097   if (auto *CB = dyn_cast<CallBase>(&I))
2098     if (Function *F = CB->getCalledFunction())
2099       if (TrackedRetVals.count(F))
2100         return false;
2101 
2102   if (isa<LoadInst>(I)) {
2103     // A load here means one of two things: a load of undef from a global,
2104     // a load from an unknown pointer.  Either way, having it return undef
2105     // is okay.
2106     return false;
2107   }
2108 
2109   markOverdefined(&I);
2110   return true;
2111 }
2112 
2113 /// While solving the dataflow for a function, we don't compute a result for
2114 /// operations with an undef operand, to allow undef to be lowered to a
2115 /// constant later. For example, constant folding of "zext i8 undef to i16"
2116 /// would result in "i16 0", and if undef is later lowered to "i8 1", then the
2117 /// zext result would become "i16 1" and would result into an overdefined
2118 /// lattice value once merged with the previous result. Not computing the
2119 /// result of the zext (treating undef the same as unknown) allows us to handle
2120 /// a later undef->constant lowering more optimally.
2121 ///
2122 /// However, if the operand remains undef when the solver returns, we do need
2123 /// to assign some result to the instruction (otherwise we would treat it as
2124 /// unreachable). For simplicity, we mark any instructions that are still
2125 /// unknown as overdefined.
2126 bool SCCPInstVisitor::resolvedUndefsIn(Function &F) {
2127   bool MadeChange = false;
2128   for (BasicBlock &BB : F) {
2129     if (!BBExecutable.count(&BB))
2130       continue;
2131 
2132     for (Instruction &I : BB)
2133       MadeChange |= resolvedUndef(I);
2134   }
2135 
2136   LLVM_DEBUG(if (MadeChange) dbgs()
2137              << "\nResolved undefs in " << F.getName() << '\n');
2138 
2139   return MadeChange;
2140 }
2141 
2142 //===----------------------------------------------------------------------===//
2143 //
2144 // SCCPSolver implementations
2145 //
2146 SCCPSolver::SCCPSolver(
2147     const DataLayout &DL,
2148     std::function<const TargetLibraryInfo &(Function &)> GetTLI,
2149     LLVMContext &Ctx)
2150     : Visitor(new SCCPInstVisitor(DL, std::move(GetTLI), Ctx)) {}
2151 
2152 SCCPSolver::~SCCPSolver() = default;
2153 
2154 void SCCPSolver::addPredicateInfo(Function &F, DominatorTree &DT,
2155                                   AssumptionCache &AC) {
2156   Visitor->addPredicateInfo(F, DT, AC);
2157 }
2158 
2159 void SCCPSolver::removeSSACopies(Function &F) {
2160   Visitor->removeSSACopies(F);
2161 }
2162 
2163 bool SCCPSolver::markBlockExecutable(BasicBlock *BB) {
2164   return Visitor->markBlockExecutable(BB);
2165 }
2166 
2167 const PredicateBase *SCCPSolver::getPredicateInfoFor(Instruction *I) {
2168   return Visitor->getPredicateInfoFor(I);
2169 }
2170 
2171 void SCCPSolver::trackValueOfGlobalVariable(GlobalVariable *GV) {
2172   Visitor->trackValueOfGlobalVariable(GV);
2173 }
2174 
2175 void SCCPSolver::addTrackedFunction(Function *F) {
2176   Visitor->addTrackedFunction(F);
2177 }
2178 
2179 void SCCPSolver::addToMustPreserveReturnsInFunctions(Function *F) {
2180   Visitor->addToMustPreserveReturnsInFunctions(F);
2181 }
2182 
2183 bool SCCPSolver::mustPreserveReturn(Function *F) {
2184   return Visitor->mustPreserveReturn(F);
2185 }
2186 
2187 void SCCPSolver::addArgumentTrackedFunction(Function *F) {
2188   Visitor->addArgumentTrackedFunction(F);
2189 }
2190 
2191 bool SCCPSolver::isArgumentTrackedFunction(Function *F) {
2192   return Visitor->isArgumentTrackedFunction(F);
2193 }
2194 
2195 const SmallPtrSetImpl<Function *> &
2196 SCCPSolver::getArgumentTrackedFunctions() const {
2197   return Visitor->getArgumentTrackedFunctions();
2198 }
2199 
2200 void SCCPSolver::solve() { Visitor->solve(); }
2201 
2202 bool SCCPSolver::resolvedUndefsIn(Function &F) {
2203   return Visitor->resolvedUndefsIn(F);
2204 }
2205 
2206 void SCCPSolver::solveWhileResolvedUndefsIn(Module &M) {
2207   Visitor->solveWhileResolvedUndefsIn(M);
2208 }
2209 
2210 void
2211 SCCPSolver::solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) {
2212   Visitor->solveWhileResolvedUndefsIn(WorkList);
2213 }
2214 
2215 void SCCPSolver::solveWhileResolvedUndefs() {
2216   Visitor->solveWhileResolvedUndefs();
2217 }
2218 
2219 bool SCCPSolver::isBlockExecutable(BasicBlock *BB) const {
2220   return Visitor->isBlockExecutable(BB);
2221 }
2222 
2223 bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const {
2224   return Visitor->isEdgeFeasible(From, To);
2225 }
2226 
2227 std::vector<ValueLatticeElement>
2228 SCCPSolver::getStructLatticeValueFor(Value *V) const {
2229   return Visitor->getStructLatticeValueFor(V);
2230 }
2231 
2232 void SCCPSolver::removeLatticeValueFor(Value *V) {
2233   return Visitor->removeLatticeValueFor(V);
2234 }
2235 
2236 void SCCPSolver::resetLatticeValueFor(CallBase *Call) {
2237   Visitor->resetLatticeValueFor(Call);
2238 }
2239 
2240 const ValueLatticeElement &SCCPSolver::getLatticeValueFor(Value *V) const {
2241   return Visitor->getLatticeValueFor(V);
2242 }
2243 
2244 const MapVector<Function *, ValueLatticeElement> &
2245 SCCPSolver::getTrackedRetVals() const {
2246   return Visitor->getTrackedRetVals();
2247 }
2248 
2249 const DenseMap<GlobalVariable *, ValueLatticeElement> &
2250 SCCPSolver::getTrackedGlobals() const {
2251   return Visitor->getTrackedGlobals();
2252 }
2253 
2254 const SmallPtrSet<Function *, 16> &SCCPSolver::getMRVFunctionsTracked() const {
2255   return Visitor->getMRVFunctionsTracked();
2256 }
2257 
2258 void SCCPSolver::markOverdefined(Value *V) { Visitor->markOverdefined(V); }
2259 
2260 void SCCPSolver::trackValueOfArgument(Argument *V) {
2261   Visitor->trackValueOfArgument(V);
2262 }
2263 
2264 bool SCCPSolver::isStructLatticeConstant(Function *F, StructType *STy) {
2265   return Visitor->isStructLatticeConstant(F, STy);
2266 }
2267 
2268 Constant *SCCPSolver::getConstant(const ValueLatticeElement &LV,
2269                                   Type *Ty) const {
2270   return Visitor->getConstant(LV, Ty);
2271 }
2272 
2273 Constant *SCCPSolver::getConstantOrNull(Value *V) const {
2274   return Visitor->getConstantOrNull(V);
2275 }
2276 
2277 void SCCPSolver::setLatticeValueForSpecializationArguments(Function *F,
2278                                    const SmallVectorImpl<ArgInfo> &Args) {
2279   Visitor->setLatticeValueForSpecializationArguments(F, Args);
2280 }
2281 
2282 void SCCPSolver::markFunctionUnreachable(Function *F) {
2283   Visitor->markFunctionUnreachable(F);
2284 }
2285 
2286 void SCCPSolver::visit(Instruction *I) { Visitor->visit(I); }
2287 
2288 void SCCPSolver::visitCall(CallInst &I) { Visitor->visitCall(I); }
2289