xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp (revision 1342eb5a832fa10e689a29faab3acb6054e4778c)
1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interface for lazy computation of value constraint
10 // information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/LazyValueInfo.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/Passes.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/ValueLattice.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/AssemblyAnnotationWriter.h"
25 #include "llvm/IR/CFG.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/InstrTypes.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/PatternMatch.h"
37 #include "llvm/IR/ValueHandle.h"
38 #include "llvm/InitializePasses.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/FormattedStream.h"
41 #include "llvm/Support/KnownBits.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include <optional>
44 using namespace llvm;
45 using namespace PatternMatch;
46 
47 #define DEBUG_TYPE "lazy-value-info"
48 
49 // This is the number of worklist items we will process to try to discover an
50 // answer for a given value.
51 static const unsigned MaxProcessedPerValue = 500;
52 
53 char LazyValueInfoWrapperPass::ID = 0;
54 LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {}
55 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
56                 "Lazy Value Information Analysis", false, true)
57 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
58 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
59 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
60                 "Lazy Value Information Analysis", false, true)
61 
62 namespace llvm {
63 FunctionPass *createLazyValueInfoPass() {
64   return new LazyValueInfoWrapperPass();
65 }
66 } // namespace llvm
67 
68 AnalysisKey LazyValueAnalysis::Key;
69 
70 /// Returns true if this lattice value represents at most one possible value.
71 /// This is as precise as any lattice value can get while still representing
72 /// reachable code.
73 static bool hasSingleValue(const ValueLatticeElement &Val) {
74   if (Val.isConstantRange() &&
75       Val.getConstantRange().isSingleElement())
76     // Integer constants are single element ranges
77     return true;
78   if (Val.isConstant())
79     // Non integer constants
80     return true;
81   return false;
82 }
83 
84 //===----------------------------------------------------------------------===//
85 //                          LazyValueInfoCache Decl
86 //===----------------------------------------------------------------------===//
87 
88 namespace {
89   /// A callback value handle updates the cache when values are erased.
90   class LazyValueInfoCache;
91   struct LVIValueHandle final : public CallbackVH {
92     LazyValueInfoCache *Parent;
93 
94     LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
95       : CallbackVH(V), Parent(P) { }
96 
97     void deleted() override;
98     void allUsesReplacedWith(Value *V) override {
99       deleted();
100     }
101   };
102 } // end anonymous namespace
103 
104 namespace {
105 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
106 
107 /// This is the cache kept by LazyValueInfo which
108 /// maintains information about queries across the clients' queries.
109 class LazyValueInfoCache {
110   /// This is all of the cached information for one basic block. It contains
111   /// the per-value lattice elements, as well as a separate set for
112   /// overdefined values to reduce memory usage. Additionally pointers
113   /// dereferenced in the block are cached for nullability queries.
114   struct BlockCacheEntry {
115     SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
116     SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
117     // std::nullopt indicates that the nonnull pointers for this basic block
118     // block have not been computed yet.
119     std::optional<NonNullPointerSet> NonNullPointers;
120   };
121 
122   /// Cached information per basic block.
123   DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
124       BlockCache;
125   /// Set of value handles used to erase values from the cache on deletion.
126   DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
127 
128   const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
129     auto It = BlockCache.find_as(BB);
130     if (It == BlockCache.end())
131       return nullptr;
132     return It->second.get();
133   }
134 
135   BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
136     auto It = BlockCache.find_as(BB);
137     if (It == BlockCache.end())
138       It = BlockCache.insert({BB, std::make_unique<BlockCacheEntry>()}).first;
139 
140     return It->second.get();
141   }
142 
143   void addValueHandle(Value *Val) {
144     auto HandleIt = ValueHandles.find_as(Val);
145     if (HandleIt == ValueHandles.end())
146       ValueHandles.insert({Val, this});
147   }
148 
149 public:
150   void insertResult(Value *Val, BasicBlock *BB,
151                     const ValueLatticeElement &Result) {
152     BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
153 
154     // Insert over-defined values into their own cache to reduce memory
155     // overhead.
156     if (Result.isOverdefined())
157       Entry->OverDefined.insert(Val);
158     else
159       Entry->LatticeElements.insert({Val, Result});
160 
161     addValueHandle(Val);
162   }
163 
164   std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
165                                                         BasicBlock *BB) const {
166     const BlockCacheEntry *Entry = getBlockEntry(BB);
167     if (!Entry)
168       return std::nullopt;
169 
170     if (Entry->OverDefined.count(V))
171       return ValueLatticeElement::getOverdefined();
172 
173     auto LatticeIt = Entry->LatticeElements.find_as(V);
174     if (LatticeIt == Entry->LatticeElements.end())
175       return std::nullopt;
176 
177     return LatticeIt->second;
178   }
179 
180   bool
181   isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
182                         function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
183     BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
184     if (!Entry->NonNullPointers) {
185       Entry->NonNullPointers = InitFn(BB);
186       for (Value *V : *Entry->NonNullPointers)
187         addValueHandle(V);
188     }
189 
190     return Entry->NonNullPointers->count(V);
191   }
192 
193   /// clear - Empty the cache.
194   void clear() {
195     BlockCache.clear();
196     ValueHandles.clear();
197   }
198 
199   /// Inform the cache that a given value has been deleted.
200   void eraseValue(Value *V);
201 
202   /// This is part of the update interface to inform the cache
203   /// that a block has been deleted.
204   void eraseBlock(BasicBlock *BB);
205 
206   /// Updates the cache to remove any influence an overdefined value in
207   /// OldSucc might have (unless also overdefined in NewSucc).  This just
208   /// flushes elements from the cache and does not add any.
209   void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
210 };
211 } // namespace
212 
213 void LazyValueInfoCache::eraseValue(Value *V) {
214   for (auto &Pair : BlockCache) {
215     Pair.second->LatticeElements.erase(V);
216     Pair.second->OverDefined.erase(V);
217     if (Pair.second->NonNullPointers)
218       Pair.second->NonNullPointers->erase(V);
219   }
220 
221   auto HandleIt = ValueHandles.find_as(V);
222   if (HandleIt != ValueHandles.end())
223     ValueHandles.erase(HandleIt);
224 }
225 
226 void LVIValueHandle::deleted() {
227   // This erasure deallocates *this, so it MUST happen after we're done
228   // using any and all members of *this.
229   Parent->eraseValue(*this);
230 }
231 
232 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
233   BlockCache.erase(BB);
234 }
235 
236 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
237                                         BasicBlock *NewSucc) {
238   // When an edge in the graph has been threaded, values that we could not
239   // determine a value for before (i.e. were marked overdefined) may be
240   // possible to solve now. We do NOT try to proactively update these values.
241   // Instead, we clear their entries from the cache, and allow lazy updating to
242   // recompute them when needed.
243 
244   // The updating process is fairly simple: we need to drop cached info
245   // for all values that were marked overdefined in OldSucc, and for those same
246   // values in any successor of OldSucc (except NewSucc) in which they were
247   // also marked overdefined.
248   std::vector<BasicBlock*> worklist;
249   worklist.push_back(OldSucc);
250 
251   const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
252   if (!Entry || Entry->OverDefined.empty())
253     return; // Nothing to process here.
254   SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
255                                       Entry->OverDefined.end());
256 
257   // Use a worklist to perform a depth-first search of OldSucc's successors.
258   // NOTE: We do not need a visited list since any blocks we have already
259   // visited will have had their overdefined markers cleared already, and we
260   // thus won't loop to their successors.
261   while (!worklist.empty()) {
262     BasicBlock *ToUpdate = worklist.back();
263     worklist.pop_back();
264 
265     // Skip blocks only accessible through NewSucc.
266     if (ToUpdate == NewSucc) continue;
267 
268     // If a value was marked overdefined in OldSucc, and is here too...
269     auto OI = BlockCache.find_as(ToUpdate);
270     if (OI == BlockCache.end() || OI->second->OverDefined.empty())
271       continue;
272     auto &ValueSet = OI->second->OverDefined;
273 
274     bool changed = false;
275     for (Value *V : ValsToClear) {
276       if (!ValueSet.erase(V))
277         continue;
278 
279       // If we removed anything, then we potentially need to update
280       // blocks successors too.
281       changed = true;
282     }
283 
284     if (!changed) continue;
285 
286     llvm::append_range(worklist, successors(ToUpdate));
287   }
288 }
289 
290 namespace llvm {
291 namespace {
292 /// An assembly annotator class to print LazyValueCache information in
293 /// comments.
294 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
295   LazyValueInfoImpl *LVIImpl;
296   // While analyzing which blocks we can solve values for, we need the dominator
297   // information.
298   DominatorTree &DT;
299 
300 public:
301   LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
302       : LVIImpl(L), DT(DTree) {}
303 
304   void emitBasicBlockStartAnnot(const BasicBlock *BB,
305                                 formatted_raw_ostream &OS) override;
306 
307   void emitInstructionAnnot(const Instruction *I,
308                             formatted_raw_ostream &OS) override;
309 };
310 } // namespace
311 // The actual implementation of the lazy analysis and update.
312 class LazyValueInfoImpl {
313 
314   /// Cached results from previous queries
315   LazyValueInfoCache TheCache;
316 
317   /// This stack holds the state of the value solver during a query.
318   /// It basically emulates the callstack of the naive
319   /// recursive value lookup process.
320   SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
321 
322   /// Keeps track of which block-value pairs are in BlockValueStack.
323   DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
324 
325   /// Push BV onto BlockValueStack unless it's already in there.
326   /// Returns true on success.
327   bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
328     if (!BlockValueSet.insert(BV).second)
329       return false;  // It's already in the stack.
330 
331     LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
332                       << BV.first->getName() << "\n");
333     BlockValueStack.push_back(BV);
334     return true;
335   }
336 
337   AssumptionCache *AC;  ///< A pointer to the cache of @llvm.assume calls.
338   const DataLayout &DL; ///< A mandatory DataLayout
339 
340   /// Declaration of the llvm.experimental.guard() intrinsic,
341   /// if it exists in the module.
342   Function *GuardDecl;
343 
344   std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
345                                                    Instruction *CxtI);
346   std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
347                                                   BasicBlock *T,
348                                                   Instruction *CxtI = nullptr);
349 
350   // These methods process one work item and may add more. A false value
351   // returned means that the work item was not completely processed and must
352   // be revisited after going through the new items.
353   bool solveBlockValue(Value *Val, BasicBlock *BB);
354   std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
355                                                          BasicBlock *BB);
356   std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
357                                                              BasicBlock *BB);
358   std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
359                                                             BasicBlock *BB);
360   std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
361                                                            BasicBlock *BB);
362   std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
363                                            BasicBlock *BB);
364   std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
365       Instruction *I, BasicBlock *BB,
366       std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
367           OpFn);
368   std::optional<ValueLatticeElement>
369   solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
370   std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
371                                                          BasicBlock *BB);
372   std::optional<ValueLatticeElement>
373   solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
374   std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
375                                                               BasicBlock *BB);
376   std::optional<ValueLatticeElement>
377   solveBlockValueInsertElement(InsertElementInst *IEI, BasicBlock *BB);
378   std::optional<ValueLatticeElement>
379   solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
380   bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
381   void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
382                                                      ValueLatticeElement &BBLV,
383                                                      Instruction *BBI);
384 
385   void solve();
386 
387   // For the following methods, if UseBlockValue is true, the function may
388   // push additional values to the worklist and return nullopt. If
389   // UseBlockValue is false, it will never return nullopt.
390 
391   std::optional<ValueLatticeElement>
392   getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
393                                   const APInt &Offset, Instruction *CxtI,
394                                   bool UseBlockValue);
395 
396   std::optional<ValueLatticeElement>
397   getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
398                             bool UseBlockValue);
399   ValueLatticeElement getValueFromTrunc(Value *Val, TruncInst *Trunc,
400                                         bool IsTrueDest);
401 
402   std::optional<ValueLatticeElement>
403   getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
404                         bool UseBlockValue, unsigned Depth = 0);
405 
406   std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
407                                                        BasicBlock *BBFrom,
408                                                        BasicBlock *BBTo,
409                                                        bool UseBlockValue);
410 
411 public:
412   /// This is the query interface to determine the lattice value for the
413   /// specified Value* at the context instruction (if specified) or at the
414   /// start of the block.
415   ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
416                                       Instruction *CxtI = nullptr);
417 
418   /// This is the query interface to determine the lattice value for the
419   /// specified Value* at the specified instruction using only information
420   /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
421   /// recursive query is performed.
422   ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
423 
424   /// This is the query interface to determine the lattice
425   /// value for the specified Value* that is true on the specified edge.
426   ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
427                                      BasicBlock *ToBB,
428                                      Instruction *CxtI = nullptr);
429 
430   ValueLatticeElement getValueAtUse(const Use &U);
431 
432   /// Complete flush all previously computed values
433   void clear() {
434     TheCache.clear();
435   }
436 
437   /// Printing the LazyValueInfo Analysis.
438   void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
439     LazyValueInfoAnnotatedWriter Writer(this, DTree);
440     F.print(OS, &Writer);
441   }
442 
443   /// This is part of the update interface to remove information related to this
444   /// value from the cache.
445   void forgetValue(Value *V) { TheCache.eraseValue(V); }
446 
447   /// This is part of the update interface to inform the cache
448   /// that a block has been deleted.
449   void eraseBlock(BasicBlock *BB) {
450     TheCache.eraseBlock(BB);
451   }
452 
453   /// This is the update interface to inform the cache that an edge from
454   /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
455   void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
456 
457   LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
458                     Function *GuardDecl)
459       : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
460 };
461 } // namespace llvm
462 
463 void LazyValueInfoImpl::solve() {
464   SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack =
465       BlockValueStack;
466 
467   unsigned processedCount = 0;
468   while (!BlockValueStack.empty()) {
469     processedCount++;
470     // Abort if we have to process too many values to get a result for this one.
471     // Because of the design of the overdefined cache currently being per-block
472     // to avoid naming-related issues (IE it wants to try to give different
473     // results for the same name in different blocks), overdefined results don't
474     // get cached globally, which in turn means we will often try to rediscover
475     // the same overdefined result again and again.  Once something like
476     // PredicateInfo is used in LVI or CVP, we should be able to make the
477     // overdefined cache global, and remove this throttle.
478     if (processedCount > MaxProcessedPerValue) {
479       LLVM_DEBUG(
480           dbgs() << "Giving up on stack because we are getting too deep\n");
481       // Fill in the original values
482       while (!StartingStack.empty()) {
483         std::pair<BasicBlock *, Value *> &e = StartingStack.back();
484         TheCache.insertResult(e.second, e.first,
485                               ValueLatticeElement::getOverdefined());
486         StartingStack.pop_back();
487       }
488       BlockValueSet.clear();
489       BlockValueStack.clear();
490       return;
491     }
492     std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
493     assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
494     unsigned StackSize = BlockValueStack.size();
495     (void) StackSize;
496 
497     if (solveBlockValue(e.second, e.first)) {
498       // The work item was completely processed.
499       assert(BlockValueStack.size() == StackSize &&
500              BlockValueStack.back() == e && "Nothing should have been pushed!");
501 #ifndef NDEBUG
502       std::optional<ValueLatticeElement> BBLV =
503           TheCache.getCachedValueInfo(e.second, e.first);
504       assert(BBLV && "Result should be in cache!");
505       LLVM_DEBUG(
506           dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
507                  << *BBLV << "\n");
508 #endif
509 
510       BlockValueStack.pop_back();
511       BlockValueSet.erase(e);
512     } else {
513       // More work needs to be done before revisiting.
514       assert(BlockValueStack.size() == StackSize + 1 &&
515              "Exactly one element should have been pushed!");
516     }
517   }
518 }
519 
520 std::optional<ValueLatticeElement>
521 LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
522                                  Instruction *CxtI) {
523   // If already a constant, there is nothing to compute.
524   if (Constant *VC = dyn_cast<Constant>(Val))
525     return ValueLatticeElement::get(VC);
526 
527   if (std::optional<ValueLatticeElement> OptLatticeVal =
528           TheCache.getCachedValueInfo(Val, BB)) {
529     intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
530     return OptLatticeVal;
531   }
532 
533   // We have hit a cycle, assume overdefined.
534   if (!pushBlockValue({ BB, Val }))
535     return ValueLatticeElement::getOverdefined();
536 
537   // Yet to be resolved.
538   return std::nullopt;
539 }
540 
541 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
542   switch (BBI->getOpcode()) {
543   default:
544     break;
545   case Instruction::Call:
546   case Instruction::Invoke:
547     if (std::optional<ConstantRange> Range = cast<CallBase>(BBI)->getRange())
548       return ValueLatticeElement::getRange(*Range);
549     [[fallthrough]];
550   case Instruction::Load:
551     if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
552       if (isa<IntegerType>(BBI->getType())) {
553         return ValueLatticeElement::getRange(
554             getConstantRangeFromMetadata(*Ranges));
555       }
556     break;
557   };
558   // Nothing known - will be intersected with other facts
559   return ValueLatticeElement::getOverdefined();
560 }
561 
562 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
563   assert(!isa<Constant>(Val) && "Value should not be constant");
564   assert(!TheCache.getCachedValueInfo(Val, BB) &&
565          "Value should not be in cache");
566 
567   // Hold off inserting this value into the Cache in case we have to return
568   // false and come back later.
569   std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
570   if (!Res)
571     // Work pushed, will revisit
572     return false;
573 
574   TheCache.insertResult(Val, BB, *Res);
575   return true;
576 }
577 
578 std::optional<ValueLatticeElement>
579 LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
580   Instruction *BBI = dyn_cast<Instruction>(Val);
581   if (!BBI || BBI->getParent() != BB)
582     return solveBlockValueNonLocal(Val, BB);
583 
584   if (PHINode *PN = dyn_cast<PHINode>(BBI))
585     return solveBlockValuePHINode(PN, BB);
586 
587   if (auto *SI = dyn_cast<SelectInst>(BBI))
588     return solveBlockValueSelect(SI, BB);
589 
590   // If this value is a nonnull pointer, record it's range and bailout.  Note
591   // that for all other pointer typed values, we terminate the search at the
592   // definition.  We could easily extend this to look through geps, bitcasts,
593   // and the like to prove non-nullness, but it's not clear that's worth it
594   // compile time wise.  The context-insensitive value walk done inside
595   // isKnownNonZero gets most of the profitable cases at much less expense.
596   // This does mean that we have a sensitivity to where the defining
597   // instruction is placed, even if it could legally be hoisted much higher.
598   // That is unfortunate.
599   PointerType *PT = dyn_cast<PointerType>(BBI->getType());
600   if (PT && isKnownNonZero(BBI, DL))
601     return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
602 
603   if (BBI->getType()->isIntOrIntVectorTy()) {
604     if (auto *CI = dyn_cast<CastInst>(BBI))
605       return solveBlockValueCast(CI, BB);
606 
607     if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
608       return solveBlockValueBinaryOp(BO, BB);
609 
610     if (auto *IEI = dyn_cast<InsertElementInst>(BBI))
611       return solveBlockValueInsertElement(IEI, BB);
612 
613     if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
614       return solveBlockValueExtractValue(EVI, BB);
615 
616     if (auto *II = dyn_cast<IntrinsicInst>(BBI))
617       return solveBlockValueIntrinsic(II, BB);
618   }
619 
620   LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
621                     << "' - unknown inst def found.\n");
622   return getFromRangeMetadata(BBI);
623 }
624 
625 static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet,
626                               bool IsDereferenced = true) {
627   // TODO: Use NullPointerIsDefined instead.
628   if (Ptr->getType()->getPointerAddressSpace() == 0)
629     PtrSet.insert(IsDereferenced ? getUnderlyingObject(Ptr)
630                                  : Ptr->stripInBoundsOffsets());
631 }
632 
633 static void AddNonNullPointersByInstruction(
634     Instruction *I, NonNullPointerSet &PtrSet) {
635   if (LoadInst *L = dyn_cast<LoadInst>(I)) {
636     AddNonNullPointer(L->getPointerOperand(), PtrSet);
637   } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
638     AddNonNullPointer(S->getPointerOperand(), PtrSet);
639   } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
640     if (MI->isVolatile()) return;
641 
642     // FIXME: check whether it has a valuerange that excludes zero?
643     ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
644     if (!Len || Len->isZero()) return;
645 
646     AddNonNullPointer(MI->getRawDest(), PtrSet);
647     if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
648       AddNonNullPointer(MTI->getRawSource(), PtrSet);
649   } else if (auto *CB = dyn_cast<CallBase>(I)) {
650     for (auto &U : CB->args()) {
651       if (U->getType()->isPointerTy() &&
652           CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
653                                   /*AllowUndefOrPoison=*/false))
654         AddNonNullPointer(U.get(), PtrSet, /*IsDereferenced=*/false);
655     }
656   }
657 }
658 
659 bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
660   if (NullPointerIsDefined(BB->getParent(),
661                            Val->getType()->getPointerAddressSpace()))
662     return false;
663 
664   Val = Val->stripInBoundsOffsets();
665   return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
666     NonNullPointerSet NonNullPointers;
667     for (Instruction &I : *BB)
668       AddNonNullPointersByInstruction(&I, NonNullPointers);
669     return NonNullPointers;
670   });
671 }
672 
673 std::optional<ValueLatticeElement>
674 LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
675   ValueLatticeElement Result;  // Start Undefined.
676 
677   // If this is the entry block, we must be asking about an argument.
678   if (BB->isEntryBlock()) {
679     assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
680     if (std::optional<ConstantRange> Range = cast<Argument>(Val)->getRange())
681       return ValueLatticeElement::getRange(*Range);
682     return ValueLatticeElement::getOverdefined();
683   }
684 
685   // Loop over all of our predecessors, merging what we know from them into
686   // result.  If we encounter an unexplored predecessor, we eagerly explore it
687   // in a depth first manner.  In practice, this has the effect of discovering
688   // paths we can't analyze eagerly without spending compile times analyzing
689   // other paths.  This heuristic benefits from the fact that predecessors are
690   // frequently arranged such that dominating ones come first and we quickly
691   // find a path to function entry.  TODO: We should consider explicitly
692   // canonicalizing to make this true rather than relying on this happy
693   // accident.
694   for (BasicBlock *Pred : predecessors(BB)) {
695     // Skip self loops.
696     if (Pred == BB)
697       continue;
698     std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
699     if (!EdgeResult)
700       // Explore that input, then return here
701       return std::nullopt;
702 
703     Result.mergeIn(*EdgeResult);
704 
705     // If we hit overdefined, exit early.  The BlockVals entry is already set
706     // to overdefined.
707     if (Result.isOverdefined()) {
708       LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
709                         << "' - overdefined because of pred '"
710                         << Pred->getName() << "' (non local).\n");
711       return Result;
712     }
713   }
714 
715   // Return the merged value, which is more precise than 'overdefined'.
716   assert(!Result.isOverdefined());
717   return Result;
718 }
719 
720 std::optional<ValueLatticeElement>
721 LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
722   ValueLatticeElement Result;  // Start Undefined.
723 
724   // Loop over all of our predecessors, merging what we know from them into
725   // result.  See the comment about the chosen traversal order in
726   // solveBlockValueNonLocal; the same reasoning applies here.
727   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
728     BasicBlock *PhiBB = PN->getIncomingBlock(i);
729     Value *PhiVal = PN->getIncomingValue(i);
730     // Note that we can provide PN as the context value to getEdgeValue, even
731     // though the results will be cached, because PN is the value being used as
732     // the cache key in the caller.
733     std::optional<ValueLatticeElement> EdgeResult =
734         getEdgeValue(PhiVal, PhiBB, BB, PN);
735     if (!EdgeResult)
736       // Explore that input, then return here
737       return std::nullopt;
738 
739     Result.mergeIn(*EdgeResult);
740 
741     // If we hit overdefined, exit early.  The BlockVals entry is already set
742     // to overdefined.
743     if (Result.isOverdefined()) {
744       LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
745                         << "' - overdefined because of pred (local).\n");
746 
747       return Result;
748     }
749   }
750 
751   // Return the merged value, which is more precise than 'overdefined'.
752   assert(!Result.isOverdefined() && "Possible PHI in entry block?");
753   return Result;
754 }
755 
756 // If we can determine a constraint on the value given conditions assumed by
757 // the program, intersect those constraints with BBLV
758 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
759     Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
760   BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
761   if (!BBI)
762     return;
763 
764   BasicBlock *BB = BBI->getParent();
765   for (auto &AssumeVH : AC->assumptionsFor(Val)) {
766     if (!AssumeVH)
767       continue;
768 
769     // Only check assumes in the block of the context instruction. Other
770     // assumes will have already been taken into account when the value was
771     // propagated from predecessor blocks.
772     auto *I = cast<CallInst>(AssumeVH);
773     if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
774       continue;
775 
776     BBLV = BBLV.intersect(*getValueFromCondition(Val, I->getArgOperand(0),
777                                                  /*IsTrueDest*/ true,
778                                                  /*UseBlockValue*/ false));
779   }
780 
781   // If guards are not used in the module, don't spend time looking for them
782   if (GuardDecl && !GuardDecl->use_empty() &&
783       BBI->getIterator() != BB->begin()) {
784     for (Instruction &I :
785          make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
786       Value *Cond = nullptr;
787       if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
788         BBLV = BBLV.intersect(*getValueFromCondition(Val, Cond,
789                                                      /*IsTrueDest*/ true,
790                                                      /*UseBlockValue*/ false));
791     }
792   }
793 
794   if (BBLV.isOverdefined()) {
795     // Check whether we're checking at the terminator, and the pointer has
796     // been dereferenced in this block.
797     PointerType *PTy = dyn_cast<PointerType>(Val->getType());
798     if (PTy && BB->getTerminator() == BBI &&
799         isNonNullAtEndOfBlock(Val, BB))
800       BBLV = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
801   }
802 }
803 
804 std::optional<ValueLatticeElement>
805 LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
806   // Recurse on our inputs if needed
807   std::optional<ValueLatticeElement> OptTrueVal =
808       getBlockValue(SI->getTrueValue(), BB, SI);
809   if (!OptTrueVal)
810     return std::nullopt;
811   ValueLatticeElement &TrueVal = *OptTrueVal;
812 
813   std::optional<ValueLatticeElement> OptFalseVal =
814       getBlockValue(SI->getFalseValue(), BB, SI);
815   if (!OptFalseVal)
816     return std::nullopt;
817   ValueLatticeElement &FalseVal = *OptFalseVal;
818 
819   if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
820     const ConstantRange &TrueCR = TrueVal.asConstantRange(SI->getType());
821     const ConstantRange &FalseCR = FalseVal.asConstantRange(SI->getType());
822     Value *LHS = nullptr;
823     Value *RHS = nullptr;
824     SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
825     // Is this a min specifically of our two inputs?  (Avoid the risk of
826     // ValueTracking getting smarter looking back past our immediate inputs.)
827     if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
828         ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
829          (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
830       ConstantRange ResultCR = [&]() {
831         switch (SPR.Flavor) {
832         default:
833           llvm_unreachable("unexpected minmax type!");
834         case SPF_SMIN:                   /// Signed minimum
835           return TrueCR.smin(FalseCR);
836         case SPF_UMIN:                   /// Unsigned minimum
837           return TrueCR.umin(FalseCR);
838         case SPF_SMAX:                   /// Signed maximum
839           return TrueCR.smax(FalseCR);
840         case SPF_UMAX:                   /// Unsigned maximum
841           return TrueCR.umax(FalseCR);
842         };
843       }();
844       return ValueLatticeElement::getRange(
845           ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
846                         FalseVal.isConstantRangeIncludingUndef());
847     }
848 
849     if (SPR.Flavor == SPF_ABS) {
850       if (LHS == SI->getTrueValue())
851         return ValueLatticeElement::getRange(
852             TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
853       if (LHS == SI->getFalseValue())
854         return ValueLatticeElement::getRange(
855             FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
856     }
857 
858     if (SPR.Flavor == SPF_NABS) {
859       ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
860       if (LHS == SI->getTrueValue())
861         return ValueLatticeElement::getRange(
862             Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
863       if (LHS == SI->getFalseValue())
864         return ValueLatticeElement::getRange(
865             Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
866     }
867   }
868 
869   // Can we constrain the facts about the true and false values by using the
870   // condition itself?  This shows up with idioms like e.g. select(a > 5, a, 5).
871   // TODO: We could potentially refine an overdefined true value above.
872   Value *Cond = SI->getCondition();
873   // If the value is undef, a different value may be chosen in
874   // the select condition.
875   if (isGuaranteedNotToBeUndef(Cond, AC)) {
876     TrueVal =
877         TrueVal.intersect(*getValueFromCondition(SI->getTrueValue(), Cond,
878                                                  /*IsTrueDest*/ true,
879                                                  /*UseBlockValue*/ false));
880     FalseVal =
881         FalseVal.intersect(*getValueFromCondition(SI->getFalseValue(), Cond,
882                                                   /*IsTrueDest*/ false,
883                                                   /*UseBlockValue*/ false));
884   }
885 
886   ValueLatticeElement Result = TrueVal;
887   Result.mergeIn(FalseVal);
888   return Result;
889 }
890 
891 std::optional<ConstantRange>
892 LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
893   std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
894   if (!OptVal)
895     return std::nullopt;
896   return OptVal->asConstantRange(V->getType());
897 }
898 
899 std::optional<ValueLatticeElement>
900 LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
901   // Filter out casts we don't know how to reason about before attempting to
902   // recurse on our operand.  This can cut a long search short if we know we're
903   // not going to be able to get any useful information anways.
904   switch (CI->getOpcode()) {
905   case Instruction::Trunc:
906   case Instruction::SExt:
907   case Instruction::ZExt:
908     break;
909   default:
910     // Unhandled instructions are overdefined.
911     LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
912                       << "' - overdefined (unknown cast).\n");
913     return ValueLatticeElement::getOverdefined();
914   }
915 
916   // Figure out the range of the LHS.  If that fails, we still apply the
917   // transfer rule on the full set since we may be able to locally infer
918   // interesting facts.
919   std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
920   if (!LHSRes)
921     // More work to do before applying this transfer rule.
922     return std::nullopt;
923   const ConstantRange &LHSRange = *LHSRes;
924 
925   const unsigned ResultBitWidth = CI->getType()->getScalarSizeInBits();
926 
927   // NOTE: We're currently limited by the set of operations that ConstantRange
928   // can evaluate symbolically.  Enhancing that set will allows us to analyze
929   // more definitions.
930   return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
931                                                        ResultBitWidth));
932 }
933 
934 std::optional<ValueLatticeElement>
935 LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
936     Instruction *I, BasicBlock *BB,
937     std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
938         OpFn) {
939   Value *LHS = I->getOperand(0);
940   Value *RHS = I->getOperand(1);
941 
942   auto ThreadBinOpOverSelect =
943       [&](Value *X, const ConstantRange &CRX, SelectInst *Y,
944           bool XIsLHS) -> std::optional<ValueLatticeElement> {
945     Value *Cond = Y->getCondition();
946     // Only handle selects with constant values.
947     Constant *TrueC = dyn_cast<Constant>(Y->getTrueValue());
948     if (!TrueC)
949       return std::nullopt;
950     Constant *FalseC = dyn_cast<Constant>(Y->getFalseValue());
951     if (!FalseC)
952       return std::nullopt;
953     if (!isGuaranteedNotToBeUndef(Cond, AC))
954       return std::nullopt;
955 
956     ConstantRange TrueX =
957         CRX.intersectWith(getValueFromCondition(X, Cond, /*CondIsTrue=*/true,
958                                                 /*UseBlockValue=*/false)
959                               ->asConstantRange(X->getType()));
960     ConstantRange FalseX =
961         CRX.intersectWith(getValueFromCondition(X, Cond, /*CondIsTrue=*/false,
962                                                 /*UseBlockValue=*/false)
963                               ->asConstantRange(X->getType()));
964     ConstantRange TrueY = TrueC->toConstantRange();
965     ConstantRange FalseY = FalseC->toConstantRange();
966 
967     if (XIsLHS)
968       return ValueLatticeElement::getRange(
969           OpFn(TrueX, TrueY).unionWith(OpFn(FalseX, FalseY)));
970     return ValueLatticeElement::getRange(
971         OpFn(TrueY, TrueX).unionWith(OpFn(FalseY, FalseX)));
972   };
973 
974   // Figure out the ranges of the operands.  If that fails, use a
975   // conservative range, but apply the transfer rule anyways.  This
976   // lets us pick up facts from expressions like "and i32 (call i32
977   // @foo()), 32"
978   std::optional<ConstantRange> LHSRes = getRangeFor(LHS, I, BB);
979   if (!LHSRes)
980     return std::nullopt;
981 
982   // Try to thread binop over rhs select
983   if (auto *SI = dyn_cast<SelectInst>(RHS)) {
984     if (auto Res = ThreadBinOpOverSelect(LHS, *LHSRes, SI, /*XIsLHS=*/true))
985       return *Res;
986   }
987 
988   std::optional<ConstantRange> RHSRes = getRangeFor(RHS, I, BB);
989   if (!RHSRes)
990     return std::nullopt;
991 
992   // Try to thread binop over lhs select
993   if (auto *SI = dyn_cast<SelectInst>(LHS)) {
994     if (auto Res = ThreadBinOpOverSelect(RHS, *RHSRes, SI, /*XIsLHS=*/false))
995       return *Res;
996   }
997 
998   const ConstantRange &LHSRange = *LHSRes;
999   const ConstantRange &RHSRange = *RHSRes;
1000   return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
1001 }
1002 
1003 std::optional<ValueLatticeElement>
1004 LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
1005   assert(BO->getOperand(0)->getType()->isSized() &&
1006          "all operands to binary operators are sized");
1007   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1008     unsigned NoWrapKind = OBO->getNoWrapKind();
1009     return solveBlockValueBinaryOpImpl(
1010         BO, BB,
1011         [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1012           return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1013         });
1014   }
1015 
1016   return solveBlockValueBinaryOpImpl(
1017       BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1018         return CR1.binaryOp(BO->getOpcode(), CR2);
1019       });
1020 }
1021 
1022 std::optional<ValueLatticeElement>
1023 LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1024                                                     BasicBlock *BB) {
1025   return solveBlockValueBinaryOpImpl(
1026       WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1027         return CR1.binaryOp(WO->getBinaryOp(), CR2);
1028       });
1029 }
1030 
1031 std::optional<ValueLatticeElement>
1032 LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1033   ValueLatticeElement MetadataVal = getFromRangeMetadata(II);
1034   if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1035     LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1036                       << "' - unknown intrinsic.\n");
1037     return MetadataVal;
1038   }
1039 
1040   SmallVector<ConstantRange, 2> OpRanges;
1041   for (Value *Op : II->args()) {
1042     std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1043     if (!Range)
1044       return std::nullopt;
1045     OpRanges.push_back(*Range);
1046   }
1047 
1048   return ValueLatticeElement::getRange(
1049              ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges))
1050       .intersect(MetadataVal);
1051 }
1052 
1053 std::optional<ValueLatticeElement>
1054 LazyValueInfoImpl::solveBlockValueInsertElement(InsertElementInst *IEI,
1055                                                 BasicBlock *BB) {
1056   std::optional<ValueLatticeElement> OptEltVal =
1057       getBlockValue(IEI->getOperand(1), BB, IEI);
1058   if (!OptEltVal)
1059     return std::nullopt;
1060   ValueLatticeElement &Res = *OptEltVal;
1061 
1062   std::optional<ValueLatticeElement> OptVecVal =
1063       getBlockValue(IEI->getOperand(0), BB, IEI);
1064   if (!OptVecVal)
1065     return std::nullopt;
1066 
1067   // Bail out if the inserted element is a constant expression. Unlike other
1068   // ValueLattice types, these are not considered an implicit splat when a
1069   // vector type is used.
1070   // We could call ConstantFoldInsertElementInstruction here to handle these.
1071   if (OptEltVal->isConstant())
1072     return ValueLatticeElement::getOverdefined();
1073 
1074   Res.mergeIn(*OptVecVal);
1075   return Res;
1076 }
1077 
1078 std::optional<ValueLatticeElement>
1079 LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1080                                                BasicBlock *BB) {
1081   if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1082     if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1083       return solveBlockValueOverflowIntrinsic(WO, BB);
1084 
1085   // Handle extractvalue of insertvalue to allow further simplification
1086   // based on replaced with.overflow intrinsics.
1087   if (Value *V = simplifyExtractValueInst(
1088           EVI->getAggregateOperand(), EVI->getIndices(),
1089           EVI->getDataLayout()))
1090     return getBlockValue(V, BB, EVI);
1091 
1092   LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1093                     << "' - overdefined (unknown extractvalue).\n");
1094   return ValueLatticeElement::getOverdefined();
1095 }
1096 
1097 static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1098                              ICmpInst::Predicate Pred) {
1099   if (LHS == Val)
1100     return true;
1101 
1102   // Handle range checking idiom produced by InstCombine. We will subtract the
1103   // offset from the allowed range for RHS in this case.
1104   const APInt *C;
1105   if (match(LHS, m_AddLike(m_Specific(Val), m_APInt(C)))) {
1106     Offset = *C;
1107     return true;
1108   }
1109 
1110   // Handle the symmetric case. This appears in saturation patterns like
1111   // (x == 16) ? 16 : (x + 1).
1112   if (match(Val, m_AddLike(m_Specific(LHS), m_APInt(C)))) {
1113     Offset = -*C;
1114     return true;
1115   }
1116 
1117   // If (x | y) < C, then (x < C) && (y < C).
1118   if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1119       (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1120     return true;
1121 
1122   // If (x & y) > C, then (x > C) && (y > C).
1123   if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1124       (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1125     return true;
1126 
1127   return false;
1128 }
1129 
1130 /// Get value range for a "(Val + Offset) Pred RHS" condition.
1131 std::optional<ValueLatticeElement>
1132 LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1133                                                    Value *RHS,
1134                                                    const APInt &Offset,
1135                                                    Instruction *CxtI,
1136                                                    bool UseBlockValue) {
1137   ConstantRange RHSRange(RHS->getType()->getScalarSizeInBits(),
1138                          /*isFullSet=*/true);
1139   if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1140     RHSRange = ConstantRange(CI->getValue());
1141   } else if (UseBlockValue) {
1142     std::optional<ValueLatticeElement> R =
1143         getBlockValue(RHS, CxtI->getParent(), CxtI);
1144     if (!R)
1145       return std::nullopt;
1146     RHSRange = R->asConstantRange(RHS->getType());
1147   }
1148 
1149   ConstantRange TrueValues =
1150       ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1151   return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1152 }
1153 
1154 static std::optional<ConstantRange>
1155 getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS,
1156                function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1157   bool Invert = false;
1158   if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1159     Pred = ICmpInst::getInversePredicate(Pred);
1160     Invert = true;
1161   }
1162   if (Pred == ICmpInst::ICMP_SLE) {
1163     Pred = ICmpInst::ICMP_SLT;
1164     if (RHS.isMaxSignedValue())
1165       return std::nullopt; // Could also return full/empty here, if we wanted.
1166     ++RHS;
1167   }
1168   assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1169   if (auto CR = Fn(RHS))
1170     return Invert ? CR->inverse() : CR;
1171   return std::nullopt;
1172 }
1173 
1174 /// Get value range for a "ctpop(Val) Pred RHS" condition.
1175 static ValueLatticeElement getValueFromICmpCtpop(ICmpInst::Predicate Pred,
1176                                                  Value *RHS) {
1177   unsigned BitWidth = RHS->getType()->getScalarSizeInBits();
1178 
1179   auto *RHSConst = dyn_cast<ConstantInt>(RHS);
1180   if (!RHSConst)
1181     return ValueLatticeElement::getOverdefined();
1182 
1183   ConstantRange ResValRange =
1184       ConstantRange::makeExactICmpRegion(Pred, RHSConst->getValue());
1185 
1186   unsigned ResMin = ResValRange.getUnsignedMin().getLimitedValue(BitWidth);
1187   unsigned ResMax = ResValRange.getUnsignedMax().getLimitedValue(BitWidth);
1188 
1189   APInt ValMin = APInt::getLowBitsSet(BitWidth, ResMin);
1190   APInt ValMax = APInt::getHighBitsSet(BitWidth, ResMax);
1191   return ValueLatticeElement::getRange(
1192       ConstantRange::getNonEmpty(std::move(ValMin), ValMax + 1));
1193 }
1194 
1195 std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1196     Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1197   Value *LHS = ICI->getOperand(0);
1198   Value *RHS = ICI->getOperand(1);
1199 
1200   // Get the predicate that must hold along the considered edge.
1201   CmpInst::Predicate EdgePred =
1202       isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1203 
1204   if (isa<Constant>(RHS)) {
1205     if (ICI->isEquality() && LHS == Val) {
1206       if (EdgePred == ICmpInst::ICMP_EQ)
1207         return ValueLatticeElement::get(cast<Constant>(RHS));
1208       else if (!isa<UndefValue>(RHS))
1209         return ValueLatticeElement::getNot(cast<Constant>(RHS));
1210     }
1211   }
1212 
1213   Type *Ty = Val->getType();
1214   if (!Ty->isIntegerTy())
1215     return ValueLatticeElement::getOverdefined();
1216 
1217   unsigned BitWidth = Ty->getScalarSizeInBits();
1218   APInt Offset(BitWidth, 0);
1219   if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1220     return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1221                                            UseBlockValue);
1222 
1223   CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1224   if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1225     return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1226                                            UseBlockValue);
1227 
1228   if (match(LHS, m_Intrinsic<Intrinsic::ctpop>(m_Specific(Val))))
1229     return getValueFromICmpCtpop(EdgePred, RHS);
1230 
1231   const APInt *Mask, *C;
1232   if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1233       match(RHS, m_APInt(C))) {
1234     // If (Val & Mask) == C then all the masked bits are known and we can
1235     // compute a value range based on that.
1236     if (EdgePred == ICmpInst::ICMP_EQ) {
1237       KnownBits Known;
1238       Known.Zero = ~*C & *Mask;
1239       Known.One = *C & *Mask;
1240       return ValueLatticeElement::getRange(
1241           ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1242     }
1243 
1244     if (EdgePred == ICmpInst::ICMP_NE)
1245       return ValueLatticeElement::getRange(
1246           ConstantRange::makeMaskNotEqualRange(*Mask, *C));
1247   }
1248 
1249   // If (X urem Modulus) >= C, then X >= C.
1250   // If trunc X >= C, then X >= C.
1251   // TODO: An upper bound could be computed as well.
1252   if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
1253                              m_Trunc(m_Specific(Val)))) &&
1254       match(RHS, m_APInt(C))) {
1255     // Use the icmp region so we don't have to deal with different predicates.
1256     ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
1257     if (!CR.isEmptySet())
1258       return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1259           CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0)));
1260   }
1261 
1262   // Recognize:
1263   // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1264   // Preconditions: (C << ShAmtC) >> ShAmtC == C
1265   const APInt *ShAmtC;
1266   if (CmpInst::isSigned(EdgePred) &&
1267       match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1268       match(RHS, m_APInt(C))) {
1269     auto CR = getRangeViaSLT(
1270         EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1271           APInt New = RHS << *ShAmtC;
1272           if ((New.ashr(*ShAmtC)) != RHS)
1273             return std::nullopt;
1274           return ConstantRange::getNonEmpty(
1275               APInt::getSignedMinValue(New.getBitWidth()), New);
1276         });
1277     if (CR)
1278       return ValueLatticeElement::getRange(*CR);
1279   }
1280 
1281   // a - b or ptrtoint(a) - ptrtoint(b) ==/!= 0 if a ==/!= b
1282   Value *X, *Y;
1283   if (ICI->isEquality() && match(Val, m_Sub(m_Value(X), m_Value(Y)))) {
1284     // Peek through ptrtoints
1285     match(X, m_PtrToIntSameSize(DL, m_Value(X)));
1286     match(Y, m_PtrToIntSameSize(DL, m_Value(Y)));
1287     if ((X == LHS && Y == RHS) || (X == RHS && Y == LHS)) {
1288       Constant *NullVal = Constant::getNullValue(Val->getType());
1289       if (EdgePred == ICmpInst::ICMP_EQ)
1290         return ValueLatticeElement::get(NullVal);
1291       return ValueLatticeElement::getNot(NullVal);
1292     }
1293   }
1294 
1295   return ValueLatticeElement::getOverdefined();
1296 }
1297 
1298 ValueLatticeElement LazyValueInfoImpl::getValueFromTrunc(Value *Val,
1299                                                          TruncInst *Trunc,
1300                                                          bool IsTrueDest) {
1301   assert(Trunc->getType()->isIntOrIntVectorTy(1));
1302 
1303   if (Trunc->getOperand(0) != Val)
1304     return ValueLatticeElement::getOverdefined();
1305 
1306   Type *Ty = Val->getType();
1307 
1308   if (Trunc->hasNoUnsignedWrap()) {
1309     if (IsTrueDest)
1310       return ValueLatticeElement::get(ConstantInt::get(Ty, 1));
1311     return ValueLatticeElement::get(Constant::getNullValue(Ty));
1312   }
1313 
1314   if (IsTrueDest)
1315     return ValueLatticeElement::getNot(Constant::getNullValue(Ty));
1316   return ValueLatticeElement::getNot(Constant::getAllOnesValue(Ty));
1317 }
1318 
1319 // Handle conditions of the form
1320 // extractvalue(op.with.overflow(%x, C), 1).
1321 static ValueLatticeElement getValueFromOverflowCondition(
1322     Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1323   // TODO: This only works with a constant RHS for now. We could also compute
1324   // the range of the RHS, but this doesn't fit into the current structure of
1325   // the edge value calculation.
1326   const APInt *C;
1327   if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1328     return ValueLatticeElement::getOverdefined();
1329 
1330   // Calculate the possible values of %x for which no overflow occurs.
1331   ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1332       WO->getBinaryOp(), *C, WO->getNoWrapKind());
1333 
1334   // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1335   // constrained to it's inverse (all values that might cause overflow).
1336   if (IsTrueDest)
1337     NWR = NWR.inverse();
1338   return ValueLatticeElement::getRange(NWR);
1339 }
1340 
1341 std::optional<ValueLatticeElement>
1342 LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1343                                          bool IsTrueDest, bool UseBlockValue,
1344                                          unsigned Depth) {
1345   if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1346     return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1347 
1348   if (auto *Trunc = dyn_cast<TruncInst>(Cond))
1349     return getValueFromTrunc(Val, Trunc, IsTrueDest);
1350 
1351   if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1352     if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1353       if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1354         return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1355 
1356   if (++Depth == MaxAnalysisRecursionDepth)
1357     return ValueLatticeElement::getOverdefined();
1358 
1359   Value *N;
1360   if (match(Cond, m_Not(m_Value(N))))
1361     return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1362 
1363   Value *L, *R;
1364   bool IsAnd;
1365   if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1366     IsAnd = true;
1367   else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1368     IsAnd = false;
1369   else
1370     return ValueLatticeElement::getOverdefined();
1371 
1372   std::optional<ValueLatticeElement> LV =
1373       getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1374   if (!LV)
1375     return std::nullopt;
1376   std::optional<ValueLatticeElement> RV =
1377       getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1378   if (!RV)
1379     return std::nullopt;
1380 
1381   // if (L && R) -> intersect L and R
1382   // if (!(L || R)) -> intersect !L and !R
1383   // if (L || R) -> union L and R
1384   // if (!(L && R)) -> union !L and !R
1385   if (IsTrueDest ^ IsAnd) {
1386     LV->mergeIn(*RV);
1387     return *LV;
1388   }
1389 
1390   return LV->intersect(*RV);
1391 }
1392 
1393 // Return true if Usr has Op as an operand, otherwise false.
1394 static bool usesOperand(User *Usr, Value *Op) {
1395   return is_contained(Usr->operands(), Op);
1396 }
1397 
1398 // Return true if the instruction type of Val is supported by
1399 // constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1400 // Call this before calling constantFoldUser() to find out if it's even worth
1401 // attempting to call it.
1402 static bool isOperationFoldable(User *Usr) {
1403   return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1404 }
1405 
1406 // Check if Usr can be simplified to an integer constant when the value of one
1407 // of its operands Op is an integer constant OpConstVal. If so, return it as an
1408 // lattice value range with a single element or otherwise return an overdefined
1409 // lattice value.
1410 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1411                                             const APInt &OpConstVal,
1412                                             const DataLayout &DL) {
1413   assert(isOperationFoldable(Usr) && "Precondition");
1414   Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1415   // Check if Usr can be simplified to a constant.
1416   if (auto *CI = dyn_cast<CastInst>(Usr)) {
1417     assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1418     if (auto *C = dyn_cast_or_null<ConstantInt>(
1419             simplifyCastInst(CI->getOpcode(), OpConst,
1420                              CI->getDestTy(), DL))) {
1421       return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1422     }
1423   } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1424     bool Op0Match = BO->getOperand(0) == Op;
1425     bool Op1Match = BO->getOperand(1) == Op;
1426     assert((Op0Match || Op1Match) &&
1427            "Operand 0 nor Operand 1 isn't a match");
1428     Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1429     Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1430     if (auto *C = dyn_cast_or_null<ConstantInt>(
1431             simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1432       return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1433     }
1434   } else if (isa<FreezeInst>(Usr)) {
1435     assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1436     return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1437   }
1438   return ValueLatticeElement::getOverdefined();
1439 }
1440 
1441 /// Compute the value of Val on the edge BBFrom -> BBTo.
1442 std::optional<ValueLatticeElement>
1443 LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1444                                      BasicBlock *BBTo, bool UseBlockValue) {
1445   // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1446   // know that v != 0.
1447   if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1448     // If this is a conditional branch and only one successor goes to BBTo, then
1449     // we may be able to infer something from the condition.
1450     if (BI->isConditional() &&
1451         BI->getSuccessor(0) != BI->getSuccessor(1)) {
1452       bool isTrueDest = BI->getSuccessor(0) == BBTo;
1453       assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1454              "BBTo isn't a successor of BBFrom");
1455       Value *Condition = BI->getCondition();
1456 
1457       // If V is the condition of the branch itself, then we know exactly what
1458       // it is.
1459       // NB: The condition on a `br` can't be a vector type.
1460       if (Condition == Val)
1461         return ValueLatticeElement::get(ConstantInt::get(
1462                               Type::getInt1Ty(Val->getContext()), isTrueDest));
1463 
1464       // If the condition of the branch is an equality comparison, we may be
1465       // able to infer the value.
1466       std::optional<ValueLatticeElement> Result =
1467           getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1468       if (!Result)
1469         return std::nullopt;
1470 
1471       if (!Result->isOverdefined())
1472         return Result;
1473 
1474       if (User *Usr = dyn_cast<User>(Val)) {
1475         assert(Result->isOverdefined() && "Result isn't overdefined");
1476         // Check with isOperationFoldable() first to avoid linearly iterating
1477         // over the operands unnecessarily which can be expensive for
1478         // instructions with many operands.
1479         if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1480           const DataLayout &DL = BBTo->getDataLayout();
1481           if (usesOperand(Usr, Condition)) {
1482             // If Val has Condition as an operand and Val can be folded into a
1483             // constant with either Condition == true or Condition == false,
1484             // propagate the constant.
1485             // eg.
1486             //   ; %Val is true on the edge to %then.
1487             //   %Val = and i1 %Condition, true.
1488             //   br %Condition, label %then, label %else
1489             APInt ConditionVal(1, isTrueDest ? 1 : 0);
1490             Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1491           } else {
1492             // If one of Val's operand has an inferred value, we may be able to
1493             // infer the value of Val.
1494             // eg.
1495             //    ; %Val is 94 on the edge to %then.
1496             //    %Val = add i8 %Op, 1
1497             //    %Condition = icmp eq i8 %Op, 93
1498             //    br i1 %Condition, label %then, label %else
1499             for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1500               Value *Op = Usr->getOperand(i);
1501               ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1502                   Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1503               if (std::optional<APInt> OpConst =
1504                       OpLatticeVal.asConstantInteger()) {
1505                 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1506                 break;
1507               }
1508             }
1509           }
1510         }
1511       }
1512       if (!Result->isOverdefined())
1513         return Result;
1514     }
1515   }
1516 
1517   // If the edge was formed by a switch on the value, then we may know exactly
1518   // what it is.
1519   if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1520     Value *Condition = SI->getCondition();
1521     if (!isa<IntegerType>(Val->getType()))
1522       return ValueLatticeElement::getOverdefined();
1523     bool ValUsesConditionAndMayBeFoldable = false;
1524     if (Condition != Val) {
1525       // Check if Val has Condition as an operand.
1526       if (User *Usr = dyn_cast<User>(Val))
1527         ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1528             usesOperand(Usr, Condition);
1529       if (!ValUsesConditionAndMayBeFoldable)
1530         return ValueLatticeElement::getOverdefined();
1531     }
1532     assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1533            "Condition != Val nor Val doesn't use Condition");
1534 
1535     bool DefaultCase = SI->getDefaultDest() == BBTo;
1536     unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1537     ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1538 
1539     for (auto Case : SI->cases()) {
1540       APInt CaseValue = Case.getCaseValue()->getValue();
1541       ConstantRange EdgeVal(CaseValue);
1542       if (ValUsesConditionAndMayBeFoldable) {
1543         User *Usr = cast<User>(Val);
1544         const DataLayout &DL = BBTo->getDataLayout();
1545         ValueLatticeElement EdgeLatticeVal =
1546             constantFoldUser(Usr, Condition, CaseValue, DL);
1547         if (EdgeLatticeVal.isOverdefined())
1548           return ValueLatticeElement::getOverdefined();
1549         EdgeVal = EdgeLatticeVal.getConstantRange();
1550       }
1551       if (DefaultCase) {
1552         // It is possible that the default destination is the destination of
1553         // some cases. We cannot perform difference for those cases.
1554         // We know Condition != CaseValue in BBTo.  In some cases we can use
1555         // this to infer Val == f(Condition) is != f(CaseValue).  For now, we
1556         // only do this when f is identity (i.e. Val == Condition), but we
1557         // should be able to do this for any injective f.
1558         if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1559           EdgesVals = EdgesVals.difference(EdgeVal);
1560       } else if (Case.getCaseSuccessor() == BBTo)
1561         EdgesVals = EdgesVals.unionWith(EdgeVal);
1562     }
1563     return ValueLatticeElement::getRange(std::move(EdgesVals));
1564   }
1565   return ValueLatticeElement::getOverdefined();
1566 }
1567 
1568 /// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1569 /// the basic block if the edge does not constrain Val.
1570 std::optional<ValueLatticeElement>
1571 LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1572                                 BasicBlock *BBTo, Instruction *CxtI) {
1573   // If already a constant, there is nothing to compute.
1574   if (Constant *VC = dyn_cast<Constant>(Val))
1575     return ValueLatticeElement::get(VC);
1576 
1577   std::optional<ValueLatticeElement> LocalResult =
1578       getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1579   if (!LocalResult)
1580     return std::nullopt;
1581 
1582   if (hasSingleValue(*LocalResult))
1583     // Can't get any more precise here
1584     return LocalResult;
1585 
1586   std::optional<ValueLatticeElement> OptInBlock =
1587       getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1588   if (!OptInBlock)
1589     return std::nullopt;
1590   ValueLatticeElement &InBlock = *OptInBlock;
1591 
1592   // We can use the context instruction (generically the ultimate instruction
1593   // the calling pass is trying to simplify) here, even though the result of
1594   // this function is generally cached when called from the solve* functions
1595   // (and that cached result might be used with queries using a different
1596   // context instruction), because when this function is called from the solve*
1597   // functions, the context instruction is not provided. When called from
1598   // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1599   // but then the result is not cached.
1600   intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1601 
1602   return LocalResult->intersect(InBlock);
1603 }
1604 
1605 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1606                                                        Instruction *CxtI) {
1607   LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1608                     << BB->getName() << "'\n");
1609 
1610   assert(BlockValueStack.empty() && BlockValueSet.empty());
1611   std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1612   if (!OptResult) {
1613     solve();
1614     OptResult = getBlockValue(V, BB, CxtI);
1615     assert(OptResult && "Value not available after solving");
1616   }
1617 
1618   ValueLatticeElement Result = *OptResult;
1619   LLVM_DEBUG(dbgs() << "  Result = " << Result << "\n");
1620   return Result;
1621 }
1622 
1623 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1624   LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1625                     << "'\n");
1626 
1627   if (auto *C = dyn_cast<Constant>(V))
1628     return ValueLatticeElement::get(C);
1629 
1630   ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1631   if (auto *I = dyn_cast<Instruction>(V))
1632     Result = getFromRangeMetadata(I);
1633   intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1634 
1635   LLVM_DEBUG(dbgs() << "  Result = " << Result << "\n");
1636   return Result;
1637 }
1638 
1639 ValueLatticeElement LazyValueInfoImpl::
1640 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1641                Instruction *CxtI) {
1642   LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1643                     << FromBB->getName() << "' to '" << ToBB->getName()
1644                     << "'\n");
1645 
1646   std::optional<ValueLatticeElement> Result =
1647       getEdgeValue(V, FromBB, ToBB, CxtI);
1648   while (!Result) {
1649     // As the worklist only explicitly tracks block values (but not edge values)
1650     // we may have to call solve() multiple times, as the edge value calculation
1651     // may request additional block values.
1652     solve();
1653     Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1654   }
1655 
1656   LLVM_DEBUG(dbgs() << "  Result = " << *Result << "\n");
1657   return *Result;
1658 }
1659 
1660 ValueLatticeElement LazyValueInfoImpl::getValueAtUse(const Use &U) {
1661   Value *V = U.get();
1662   auto *CxtI = cast<Instruction>(U.getUser());
1663   ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1664 
1665   // Check whether the only (possibly transitive) use of the value is in a
1666   // position where V can be constrained by a select or branch condition.
1667   const Use *CurrU = &U;
1668   // TODO: Increase limit?
1669   const unsigned MaxUsesToInspect = 3;
1670   for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1671     std::optional<ValueLatticeElement> CondVal;
1672     auto *CurrI = cast<Instruction>(CurrU->getUser());
1673     if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1674       // If the value is undef, a different value may be chosen in
1675       // the select condition and at use.
1676       if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1677         break;
1678       if (CurrU->getOperandNo() == 1)
1679         CondVal =
1680             *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1681                                    /*UseBlockValue*/ false);
1682       else if (CurrU->getOperandNo() == 2)
1683         CondVal =
1684             *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1685                                    /*UseBlockValue*/ false);
1686     } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1687       // TODO: Use non-local query?
1688       CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1689                                    PHI->getParent(), /*UseBlockValue*/ false);
1690     }
1691     if (CondVal)
1692       VL = VL.intersect(*CondVal);
1693 
1694     // Only follow one-use chain, to allow direct intersection of conditions.
1695     // If there are multiple uses, we would have to intersect with the union of
1696     // all conditions at different uses.
1697     // Stop walking if we hit a non-speculatable instruction. Even if the
1698     // result is only used under a specific condition, executing the
1699     // instruction itself may cause side effects or UB already.
1700     // This also disallows looking through phi nodes: If the phi node is part
1701     // of a cycle, we might end up reasoning about values from different cycle
1702     // iterations (PR60629).
1703     if (!CurrI->hasOneUse() ||
1704         !isSafeToSpeculativelyExecuteWithVariableReplaced(
1705             CurrI, /*IgnoreUBImplyingAttrs=*/false))
1706       break;
1707     CurrU = &*CurrI->use_begin();
1708   }
1709   return VL;
1710 }
1711 
1712 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1713                                    BasicBlock *NewSucc) {
1714   TheCache.threadEdgeImpl(OldSucc, NewSucc);
1715 }
1716 
1717 //===----------------------------------------------------------------------===//
1718 //                            LazyValueInfo Impl
1719 //===----------------------------------------------------------------------===//
1720 
1721 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1722   Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1723 
1724   if (auto *Impl = Info.getImpl())
1725     Impl->clear();
1726 
1727   // Fully lazy.
1728   return false;
1729 }
1730 
1731 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1732   AU.setPreservesAll();
1733   AU.addRequired<AssumptionCacheTracker>();
1734   AU.addRequired<TargetLibraryInfoWrapperPass>();
1735 }
1736 
1737 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1738 
1739 /// This lazily constructs the LazyValueInfoImpl.
1740 LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1741   if (!PImpl) {
1742     assert(M && "getCache() called with a null Module");
1743     const DataLayout &DL = M->getDataLayout();
1744     Function *GuardDecl =
1745         Intrinsic::getDeclarationIfExists(M, Intrinsic::experimental_guard);
1746     PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1747   }
1748   return *PImpl;
1749 }
1750 
1751 LazyValueInfoImpl *LazyValueInfo::getImpl() { return PImpl; }
1752 
1753 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1754 
1755 void LazyValueInfo::releaseMemory() {
1756   // If the cache was allocated, free it.
1757   if (auto *Impl = getImpl()) {
1758     delete &*Impl;
1759     PImpl = nullptr;
1760   }
1761 }
1762 
1763 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1764                                FunctionAnalysisManager::Invalidator &Inv) {
1765   // We need to invalidate if we have either failed to preserve this analyses
1766   // result directly or if any of its dependencies have been invalidated.
1767   auto PAC = PA.getChecker<LazyValueAnalysis>();
1768   if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1769     return true;
1770 
1771   return false;
1772 }
1773 
1774 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1775 
1776 LazyValueInfo LazyValueAnalysis::run(Function &F,
1777                                      FunctionAnalysisManager &FAM) {
1778   auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1779 
1780   return LazyValueInfo(&AC, &F.getDataLayout());
1781 }
1782 
1783 /// Returns true if we can statically tell that this value will never be a
1784 /// "useful" constant.  In practice, this means we've got something like an
1785 /// alloca or a malloc call for which a comparison against a constant can
1786 /// only be guarding dead code.  Note that we are potentially giving up some
1787 /// precision in dead code (a constant result) in favour of avoiding a
1788 /// expensive search for a easily answered common query.
1789 static bool isKnownNonConstant(Value *V) {
1790   V = V->stripPointerCasts();
1791   // The return val of alloc cannot be a Constant.
1792   if (isa<AllocaInst>(V))
1793     return true;
1794   return false;
1795 }
1796 
1797 Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
1798   // Bail out early if V is known not to be a Constant.
1799   if (isKnownNonConstant(V))
1800     return nullptr;
1801 
1802   BasicBlock *BB = CxtI->getParent();
1803   ValueLatticeElement Result =
1804       getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1805 
1806   if (Result.isConstant())
1807     return Result.getConstant();
1808   if (Result.isConstantRange()) {
1809     const ConstantRange &CR = Result.getConstantRange();
1810     if (const APInt *SingleVal = CR.getSingleElement())
1811       return ConstantInt::get(V->getType(), *SingleVal);
1812   }
1813   return nullptr;
1814 }
1815 
1816 ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
1817                                               bool UndefAllowed) {
1818   BasicBlock *BB = CxtI->getParent();
1819   ValueLatticeElement Result =
1820       getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1821   return Result.asConstantRange(V->getType(), UndefAllowed);
1822 }
1823 
1824 ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
1825                                                    bool UndefAllowed) {
1826   auto *Inst = cast<Instruction>(U.getUser());
1827   ValueLatticeElement Result =
1828       getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
1829   return Result.asConstantRange(U->getType(), UndefAllowed);
1830 }
1831 
1832 /// Determine whether the specified value is known to be a
1833 /// constant on the specified edge. Return null if not.
1834 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1835                                            BasicBlock *ToBB,
1836                                            Instruction *CxtI) {
1837   Module *M = FromBB->getModule();
1838   ValueLatticeElement Result =
1839       getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1840 
1841   if (Result.isConstant())
1842     return Result.getConstant();
1843   if (Result.isConstantRange()) {
1844     const ConstantRange &CR = Result.getConstantRange();
1845     if (const APInt *SingleVal = CR.getSingleElement())
1846       return ConstantInt::get(V->getType(), *SingleVal);
1847   }
1848   return nullptr;
1849 }
1850 
1851 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1852                                                     BasicBlock *FromBB,
1853                                                     BasicBlock *ToBB,
1854                                                     Instruction *CxtI) {
1855   Module *M = FromBB->getModule();
1856   ValueLatticeElement Result =
1857       getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1858   // TODO: Should undef be allowed here?
1859   return Result.asConstantRange(V->getType(), /*UndefAllowed*/ true);
1860 }
1861 
1862 static Constant *getPredicateResult(CmpInst::Predicate Pred, Constant *C,
1863                                     const ValueLatticeElement &Val,
1864                                     const DataLayout &DL) {
1865   // If we know the value is a constant, evaluate the conditional.
1866   if (Val.isConstant())
1867     return ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
1868 
1869   Type *ResTy = CmpInst::makeCmpResultType(C->getType());
1870   if (Val.isConstantRange()) {
1871     const ConstantRange &CR = Val.getConstantRange();
1872     ConstantRange RHS = C->toConstantRange();
1873     if (CR.icmp(Pred, RHS))
1874       return ConstantInt::getTrue(ResTy);
1875     if (CR.icmp(CmpInst::getInversePredicate(Pred), RHS))
1876       return ConstantInt::getFalse(ResTy);
1877     return nullptr;
1878   }
1879 
1880   if (Val.isNotConstant()) {
1881     // If this is an equality comparison, we can try to fold it knowing that
1882     // "V != C1".
1883     if (Pred == ICmpInst::ICMP_EQ) {
1884       // !C1 == C -> false iff C1 == C.
1885       Constant *Res = ConstantFoldCompareInstOperands(
1886           ICmpInst::ICMP_NE, Val.getNotConstant(), C, DL);
1887       if (Res && Res->isNullValue())
1888         return ConstantInt::getFalse(ResTy);
1889     } else if (Pred == ICmpInst::ICMP_NE) {
1890       // !C1 != C -> true iff C1 == C.
1891       Constant *Res = ConstantFoldCompareInstOperands(
1892           ICmpInst::ICMP_NE, Val.getNotConstant(), C, DL);
1893       if (Res && Res->isNullValue())
1894         return ConstantInt::getTrue(ResTy);
1895     }
1896     return nullptr;
1897   }
1898 
1899   return nullptr;
1900 }
1901 
1902 /// Determine whether the specified value comparison with a constant is known to
1903 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1904 Constant *LazyValueInfo::getPredicateOnEdge(CmpInst::Predicate Pred, Value *V,
1905                                             Constant *C, BasicBlock *FromBB,
1906                                             BasicBlock *ToBB,
1907                                             Instruction *CxtI) {
1908   Module *M = FromBB->getModule();
1909   ValueLatticeElement Result =
1910       getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1911 
1912   return getPredicateResult(Pred, C, Result, M->getDataLayout());
1913 }
1914 
1915 Constant *LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred, Value *V,
1916                                         Constant *C, Instruction *CxtI,
1917                                         bool UseBlockValue) {
1918   // Is or is not NonNull are common predicates being queried. If
1919   // isKnownNonZero can tell us the result of the predicate, we can
1920   // return it quickly. But this is only a fastpath, and falling
1921   // through would still be correct.
1922   Module *M = CxtI->getModule();
1923   const DataLayout &DL = M->getDataLayout();
1924   if (V->getType()->isPointerTy() && C->isNullValue() &&
1925       isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1926     Type *ResTy = CmpInst::makeCmpResultType(C->getType());
1927     if (Pred == ICmpInst::ICMP_EQ)
1928       return ConstantInt::getFalse(ResTy);
1929     else if (Pred == ICmpInst::ICMP_NE)
1930       return ConstantInt::getTrue(ResTy);
1931   }
1932 
1933   auto &Impl = getOrCreateImpl(M);
1934   ValueLatticeElement Result =
1935       UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
1936                     : Impl.getValueAt(V, CxtI);
1937   Constant *Ret = getPredicateResult(Pred, C, Result, DL);
1938   if (Ret)
1939     return Ret;
1940 
1941   // Note: The following bit of code is somewhat distinct from the rest of LVI;
1942   // LVI as a whole tries to compute a lattice value which is conservatively
1943   // correct at a given location.  In this case, we have a predicate which we
1944   // weren't able to prove about the merged result, and we're pushing that
1945   // predicate back along each incoming edge to see if we can prove it
1946   // separately for each input.  As a motivating example, consider:
1947   // bb1:
1948   //   %v1 = ... ; constantrange<1, 5>
1949   //   br label %merge
1950   // bb2:
1951   //   %v2 = ... ; constantrange<10, 20>
1952   //   br label %merge
1953   // merge:
1954   //   %phi = phi [%v1, %v2] ; constantrange<1,20>
1955   //   %pred = icmp eq i32 %phi, 8
1956   // We can't tell from the lattice value for '%phi' that '%pred' is false
1957   // along each path, but by checking the predicate over each input separately,
1958   // we can.
1959   // We limit the search to one step backwards from the current BB and value.
1960   // We could consider extending this to search further backwards through the
1961   // CFG and/or value graph, but there are non-obvious compile time vs quality
1962   // tradeoffs.
1963   BasicBlock *BB = CxtI->getParent();
1964 
1965   // Function entry or an unreachable block.  Bail to avoid confusing
1966   // analysis below.
1967   pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1968   if (PI == PE)
1969     return nullptr;
1970 
1971   // If V is a PHI node in the same block as the context, we need to ask
1972   // questions about the predicate as applied to the incoming value along
1973   // each edge. This is useful for eliminating cases where the predicate is
1974   // known along all incoming edges.
1975   if (auto *PHI = dyn_cast<PHINode>(V))
1976     if (PHI->getParent() == BB) {
1977       Constant *Baseline = nullptr;
1978       for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1979         Value *Incoming = PHI->getIncomingValue(i);
1980         BasicBlock *PredBB = PHI->getIncomingBlock(i);
1981         // Note that PredBB may be BB itself.
1982         Constant *Result =
1983             getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1984 
1985         // Keep going as long as we've seen a consistent known result for
1986         // all inputs.
1987         Baseline = (i == 0) ? Result /* First iteration */
1988                             : (Baseline == Result ? Baseline
1989                                                   : nullptr); /* All others */
1990         if (!Baseline)
1991           break;
1992       }
1993       if (Baseline)
1994         return Baseline;
1995     }
1996 
1997   // For a comparison where the V is outside this block, it's possible
1998   // that we've branched on it before. Look to see if the value is known
1999   // on all incoming edges.
2000   if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
2001     // For predecessor edge, determine if the comparison is true or false
2002     // on that edge. If they're all true or all false, we can conclude
2003     // the value of the comparison in this block.
2004     Constant *Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
2005     if (Baseline) {
2006       // Check that all remaining incoming values match the first one.
2007       while (++PI != PE) {
2008         Constant *Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
2009         if (Ret != Baseline)
2010           break;
2011       }
2012       // If we terminated early, then one of the values didn't match.
2013       if (PI == PE) {
2014         return Baseline;
2015       }
2016     }
2017   }
2018 
2019   return nullptr;
2020 }
2021 
2022 Constant *LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred, Value *LHS,
2023                                         Value *RHS, Instruction *CxtI,
2024                                         bool UseBlockValue) {
2025   if (auto *C = dyn_cast<Constant>(RHS))
2026     return getPredicateAt(Pred, LHS, C, CxtI, UseBlockValue);
2027   if (auto *C = dyn_cast<Constant>(LHS))
2028     return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
2029                           UseBlockValue);
2030 
2031   // Got two non-Constant values. Try to determine the comparison results based
2032   // on the block values of the two operands, e.g. because they have
2033   // non-overlapping ranges.
2034   if (UseBlockValue) {
2035     Module *M = CxtI->getModule();
2036     ValueLatticeElement L =
2037         getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
2038     if (L.isOverdefined())
2039       return nullptr;
2040 
2041     ValueLatticeElement R =
2042         getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
2043     Type *Ty = CmpInst::makeCmpResultType(LHS->getType());
2044     return L.getCompare(Pred, Ty, R, M->getDataLayout());
2045   }
2046   return nullptr;
2047 }
2048 
2049 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
2050                                BasicBlock *NewSucc) {
2051   if (auto *Impl = getImpl())
2052     Impl->threadEdge(PredBB, OldSucc, NewSucc);
2053 }
2054 
2055 void LazyValueInfo::forgetValue(Value *V) {
2056   if (auto *Impl = getImpl())
2057     Impl->forgetValue(V);
2058 }
2059 
2060 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
2061   if (auto *Impl = getImpl())
2062     Impl->eraseBlock(BB);
2063 }
2064 
2065 void LazyValueInfo::clear() {
2066   if (auto *Impl = getImpl())
2067     Impl->clear();
2068 }
2069 
2070 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
2071   if (auto *Impl = getImpl())
2072     Impl->printLVI(F, DTree, OS);
2073 }
2074 
2075 // Print the LVI for the function arguments at the start of each basic block.
2076 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2077     const BasicBlock *BB, formatted_raw_ostream &OS) {
2078   // Find if there are latticevalues defined for arguments of the function.
2079   auto *F = BB->getParent();
2080   for (const auto &Arg : F->args()) {
2081     ValueLatticeElement Result = LVIImpl->getValueInBlock(
2082         const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
2083     if (Result.isUnknown())
2084       continue;
2085     OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2086   }
2087 }
2088 
2089 // This function prints the LVI analysis for the instruction I at the beginning
2090 // of various basic blocks. It relies on calculated values that are stored in
2091 // the LazyValueInfoCache, and in the absence of cached values, recalculate the
2092 // LazyValueInfo for `I`, and print that info.
2093 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2094     const Instruction *I, formatted_raw_ostream &OS) {
2095 
2096   auto *ParentBB = I->getParent();
2097   SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2098   // We can generate (solve) LVI values only for blocks that are dominated by
2099   // the I's parent. However, to avoid generating LVI for all dominating blocks,
2100   // that contain redundant/uninteresting information, we print LVI for
2101   // blocks that may use this LVI information (such as immediate successor
2102   // blocks, and blocks that contain uses of `I`).
2103   auto printResult = [&](const BasicBlock *BB) {
2104     if (!BlocksContainingLVI.insert(BB).second)
2105       return;
2106     ValueLatticeElement Result = LVIImpl->getValueInBlock(
2107         const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2108       OS << "; LatticeVal for: '" << *I << "' in BB: '";
2109       BB->printAsOperand(OS, false);
2110       OS << "' is: " << Result << "\n";
2111   };
2112 
2113   printResult(ParentBB);
2114   // Print the LVI analysis results for the immediate successor blocks, that
2115   // are dominated by `ParentBB`.
2116   for (const auto *BBSucc : successors(ParentBB))
2117     if (DT.dominates(ParentBB, BBSucc))
2118       printResult(BBSucc);
2119 
2120   // Print LVI in blocks where `I` is used.
2121   for (const auto *U : I->users())
2122     if (auto *UseI = dyn_cast<Instruction>(U))
2123       if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2124         printResult(UseI->getParent());
2125 
2126 }
2127 
2128 PreservedAnalyses LazyValueInfoPrinterPass::run(Function &F,
2129                                                 FunctionAnalysisManager &AM) {
2130   OS << "LVI for function '" << F.getName() << "':\n";
2131   auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2132   auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2133   LVI.printLVI(F, DTree, OS);
2134   return PreservedAnalyses::all();
2135 }
2136