1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interface for lazy computation of value constraint
10 // information.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/LazyValueInfo.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/ValueLattice.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/AssemblyAnnotationWriter.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Dominators.h"
29 #include "llvm/IR/InstrTypes.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/IR/ValueHandle.h"
37 #include "llvm/InitializePasses.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/FormattedStream.h"
40 #include "llvm/Support/KnownBits.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include <optional>
43 using namespace llvm;
44 using namespace PatternMatch;
45
46 #define DEBUG_TYPE "lazy-value-info"
47
48 // This is the number of worklist items we will process to try to discover an
49 // answer for a given value.
50 static const unsigned MaxProcessedPerValue = 500;
51
52 char LazyValueInfoWrapperPass::ID = 0;
LazyValueInfoWrapperPass()53 LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {
54 initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry());
55 }
56 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
57 "Lazy Value Information Analysis", false, true)
58 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
59 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
60 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
61 "Lazy Value Information Analysis", false, true)
62
63 namespace llvm {
createLazyValueInfoPass()64 FunctionPass *createLazyValueInfoPass() {
65 return new LazyValueInfoWrapperPass();
66 }
67 } // namespace llvm
68
69 AnalysisKey LazyValueAnalysis::Key;
70
71 /// Returns true if this lattice value represents at most one possible value.
72 /// This is as precise as any lattice value can get while still representing
73 /// reachable code.
hasSingleValue(const ValueLatticeElement & Val)74 static bool hasSingleValue(const ValueLatticeElement &Val) {
75 if (Val.isConstantRange() &&
76 Val.getConstantRange().isSingleElement())
77 // Integer constants are single element ranges
78 return true;
79 if (Val.isConstant())
80 // Non integer constants
81 return true;
82 return false;
83 }
84
85 /// Combine two sets of facts about the same value into a single set of
86 /// facts. Note that this method is not suitable for merging facts along
87 /// different paths in a CFG; that's what the mergeIn function is for. This
88 /// is for merging facts gathered about the same value at the same location
89 /// through two independent means.
90 /// Notes:
91 /// * This method does not promise to return the most precise possible lattice
92 /// value implied by A and B. It is allowed to return any lattice element
93 /// which is at least as strong as *either* A or B (unless our facts
94 /// conflict, see below).
95 /// * Due to unreachable code, the intersection of two lattice values could be
96 /// contradictory. If this happens, we return some valid lattice value so as
97 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
98 /// we do not make this guarantee. TODO: This would be a useful enhancement.
intersect(const ValueLatticeElement & A,const ValueLatticeElement & B)99 static ValueLatticeElement intersect(const ValueLatticeElement &A,
100 const ValueLatticeElement &B) {
101 // Undefined is the strongest state. It means the value is known to be along
102 // an unreachable path.
103 if (A.isUnknown())
104 return A;
105 if (B.isUnknown())
106 return B;
107
108 // If we gave up for one, but got a useable fact from the other, use it.
109 if (A.isOverdefined())
110 return B;
111 if (B.isOverdefined())
112 return A;
113
114 // Can't get any more precise than constants.
115 if (hasSingleValue(A))
116 return A;
117 if (hasSingleValue(B))
118 return B;
119
120 // Could be either constant range or not constant here.
121 if (!A.isConstantRange() || !B.isConstantRange()) {
122 // TODO: Arbitrary choice, could be improved
123 return A;
124 }
125
126 // Intersect two constant ranges
127 ConstantRange Range =
128 A.getConstantRange().intersectWith(B.getConstantRange());
129 // Note: An empty range is implicitly converted to unknown or undef depending
130 // on MayIncludeUndef internally.
131 return ValueLatticeElement::getRange(
132 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
133 B.isConstantRangeIncludingUndef());
134 }
135
136 //===----------------------------------------------------------------------===//
137 // LazyValueInfoCache Decl
138 //===----------------------------------------------------------------------===//
139
140 namespace {
141 /// A callback value handle updates the cache when values are erased.
142 class LazyValueInfoCache;
143 struct LVIValueHandle final : public CallbackVH {
144 LazyValueInfoCache *Parent;
145
LVIValueHandle__anon6556ff8c0111::LVIValueHandle146 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
147 : CallbackVH(V), Parent(P) { }
148
149 void deleted() override;
allUsesReplacedWith__anon6556ff8c0111::LVIValueHandle150 void allUsesReplacedWith(Value *V) override {
151 deleted();
152 }
153 };
154 } // end anonymous namespace
155
156 namespace {
157 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
158
159 /// This is the cache kept by LazyValueInfo which
160 /// maintains information about queries across the clients' queries.
161 class LazyValueInfoCache {
162 /// This is all of the cached information for one basic block. It contains
163 /// the per-value lattice elements, as well as a separate set for
164 /// overdefined values to reduce memory usage. Additionally pointers
165 /// dereferenced in the block are cached for nullability queries.
166 struct BlockCacheEntry {
167 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
168 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
169 // std::nullopt indicates that the nonnull pointers for this basic block
170 // block have not been computed yet.
171 std::optional<NonNullPointerSet> NonNullPointers;
172 };
173
174 /// Cached information per basic block.
175 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
176 BlockCache;
177 /// Set of value handles used to erase values from the cache on deletion.
178 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
179
getBlockEntry(BasicBlock * BB) const180 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
181 auto It = BlockCache.find_as(BB);
182 if (It == BlockCache.end())
183 return nullptr;
184 return It->second.get();
185 }
186
getOrCreateBlockEntry(BasicBlock * BB)187 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
188 auto It = BlockCache.find_as(BB);
189 if (It == BlockCache.end())
190 It = BlockCache.insert({BB, std::make_unique<BlockCacheEntry>()}).first;
191
192 return It->second.get();
193 }
194
addValueHandle(Value * Val)195 void addValueHandle(Value *Val) {
196 auto HandleIt = ValueHandles.find_as(Val);
197 if (HandleIt == ValueHandles.end())
198 ValueHandles.insert({Val, this});
199 }
200
201 public:
insertResult(Value * Val,BasicBlock * BB,const ValueLatticeElement & Result)202 void insertResult(Value *Val, BasicBlock *BB,
203 const ValueLatticeElement &Result) {
204 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
205
206 // Insert over-defined values into their own cache to reduce memory
207 // overhead.
208 if (Result.isOverdefined())
209 Entry->OverDefined.insert(Val);
210 else
211 Entry->LatticeElements.insert({Val, Result});
212
213 addValueHandle(Val);
214 }
215
getCachedValueInfo(Value * V,BasicBlock * BB) const216 std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
217 BasicBlock *BB) const {
218 const BlockCacheEntry *Entry = getBlockEntry(BB);
219 if (!Entry)
220 return std::nullopt;
221
222 if (Entry->OverDefined.count(V))
223 return ValueLatticeElement::getOverdefined();
224
225 auto LatticeIt = Entry->LatticeElements.find_as(V);
226 if (LatticeIt == Entry->LatticeElements.end())
227 return std::nullopt;
228
229 return LatticeIt->second;
230 }
231
232 bool
isNonNullAtEndOfBlock(Value * V,BasicBlock * BB,function_ref<NonNullPointerSet (BasicBlock *)> InitFn)233 isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
234 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
235 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
236 if (!Entry->NonNullPointers) {
237 Entry->NonNullPointers = InitFn(BB);
238 for (Value *V : *Entry->NonNullPointers)
239 addValueHandle(V);
240 }
241
242 return Entry->NonNullPointers->count(V);
243 }
244
245 /// clear - Empty the cache.
clear()246 void clear() {
247 BlockCache.clear();
248 ValueHandles.clear();
249 }
250
251 /// Inform the cache that a given value has been deleted.
252 void eraseValue(Value *V);
253
254 /// This is part of the update interface to inform the cache
255 /// that a block has been deleted.
256 void eraseBlock(BasicBlock *BB);
257
258 /// Updates the cache to remove any influence an overdefined value in
259 /// OldSucc might have (unless also overdefined in NewSucc). This just
260 /// flushes elements from the cache and does not add any.
261 void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
262 };
263 } // namespace
264
eraseValue(Value * V)265 void LazyValueInfoCache::eraseValue(Value *V) {
266 for (auto &Pair : BlockCache) {
267 Pair.second->LatticeElements.erase(V);
268 Pair.second->OverDefined.erase(V);
269 if (Pair.second->NonNullPointers)
270 Pair.second->NonNullPointers->erase(V);
271 }
272
273 auto HandleIt = ValueHandles.find_as(V);
274 if (HandleIt != ValueHandles.end())
275 ValueHandles.erase(HandleIt);
276 }
277
deleted()278 void LVIValueHandle::deleted() {
279 // This erasure deallocates *this, so it MUST happen after we're done
280 // using any and all members of *this.
281 Parent->eraseValue(*this);
282 }
283
eraseBlock(BasicBlock * BB)284 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
285 BlockCache.erase(BB);
286 }
287
threadEdgeImpl(BasicBlock * OldSucc,BasicBlock * NewSucc)288 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
289 BasicBlock *NewSucc) {
290 // When an edge in the graph has been threaded, values that we could not
291 // determine a value for before (i.e. were marked overdefined) may be
292 // possible to solve now. We do NOT try to proactively update these values.
293 // Instead, we clear their entries from the cache, and allow lazy updating to
294 // recompute them when needed.
295
296 // The updating process is fairly simple: we need to drop cached info
297 // for all values that were marked overdefined in OldSucc, and for those same
298 // values in any successor of OldSucc (except NewSucc) in which they were
299 // also marked overdefined.
300 std::vector<BasicBlock*> worklist;
301 worklist.push_back(OldSucc);
302
303 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
304 if (!Entry || Entry->OverDefined.empty())
305 return; // Nothing to process here.
306 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
307 Entry->OverDefined.end());
308
309 // Use a worklist to perform a depth-first search of OldSucc's successors.
310 // NOTE: We do not need a visited list since any blocks we have already
311 // visited will have had their overdefined markers cleared already, and we
312 // thus won't loop to their successors.
313 while (!worklist.empty()) {
314 BasicBlock *ToUpdate = worklist.back();
315 worklist.pop_back();
316
317 // Skip blocks only accessible through NewSucc.
318 if (ToUpdate == NewSucc) continue;
319
320 // If a value was marked overdefined in OldSucc, and is here too...
321 auto OI = BlockCache.find_as(ToUpdate);
322 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
323 continue;
324 auto &ValueSet = OI->second->OverDefined;
325
326 bool changed = false;
327 for (Value *V : ValsToClear) {
328 if (!ValueSet.erase(V))
329 continue;
330
331 // If we removed anything, then we potentially need to update
332 // blocks successors too.
333 changed = true;
334 }
335
336 if (!changed) continue;
337
338 llvm::append_range(worklist, successors(ToUpdate));
339 }
340 }
341
342 namespace llvm {
343 namespace {
344 /// An assembly annotator class to print LazyValueCache information in
345 /// comments.
346 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
347 LazyValueInfoImpl *LVIImpl;
348 // While analyzing which blocks we can solve values for, we need the dominator
349 // information.
350 DominatorTree &DT;
351
352 public:
LazyValueInfoAnnotatedWriter(LazyValueInfoImpl * L,DominatorTree & DTree)353 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
354 : LVIImpl(L), DT(DTree) {}
355
356 void emitBasicBlockStartAnnot(const BasicBlock *BB,
357 formatted_raw_ostream &OS) override;
358
359 void emitInstructionAnnot(const Instruction *I,
360 formatted_raw_ostream &OS) override;
361 };
362 } // namespace
363 // The actual implementation of the lazy analysis and update. Note that the
364 // inheritance from LazyValueInfoCache is intended to be temporary while
365 // splitting the code and then transitioning to a has-a relationship.
366 class LazyValueInfoImpl {
367
368 /// Cached results from previous queries
369 LazyValueInfoCache TheCache;
370
371 /// This stack holds the state of the value solver during a query.
372 /// It basically emulates the callstack of the naive
373 /// recursive value lookup process.
374 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
375
376 /// Keeps track of which block-value pairs are in BlockValueStack.
377 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
378
379 /// Push BV onto BlockValueStack unless it's already in there.
380 /// Returns true on success.
pushBlockValue(const std::pair<BasicBlock *,Value * > & BV)381 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
382 if (!BlockValueSet.insert(BV).second)
383 return false; // It's already in the stack.
384
385 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
386 << BV.first->getName() << "\n");
387 BlockValueStack.push_back(BV);
388 return true;
389 }
390
391 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
392 const DataLayout &DL; ///< A mandatory DataLayout
393
394 /// Declaration of the llvm.experimental.guard() intrinsic,
395 /// if it exists in the module.
396 Function *GuardDecl;
397
398 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
399 Instruction *CxtI);
400 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
401 BasicBlock *T,
402 Instruction *CxtI = nullptr);
403
404 // These methods process one work item and may add more. A false value
405 // returned means that the work item was not completely processed and must
406 // be revisited after going through the new items.
407 bool solveBlockValue(Value *Val, BasicBlock *BB);
408 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
409 BasicBlock *BB);
410 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
411 BasicBlock *BB);
412 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
413 BasicBlock *BB);
414 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
415 BasicBlock *BB);
416 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
417 BasicBlock *BB);
418 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
419 Instruction *I, BasicBlock *BB,
420 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
421 OpFn);
422 std::optional<ValueLatticeElement>
423 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
424 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
425 BasicBlock *BB);
426 std::optional<ValueLatticeElement>
427 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
428 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
429 BasicBlock *BB);
430 std::optional<ValueLatticeElement>
431 solveBlockValueInsertElement(InsertElementInst *IEI, BasicBlock *BB);
432 std::optional<ValueLatticeElement>
433 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
434 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
435 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
436 ValueLatticeElement &BBLV,
437 Instruction *BBI);
438
439 void solve();
440
441 // For the following methods, if UseBlockValue is true, the function may
442 // push additional values to the worklist and return nullopt. If
443 // UseBlockValue is false, it will never return nullopt.
444
445 std::optional<ValueLatticeElement>
446 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
447 const APInt &Offset, Instruction *CxtI,
448 bool UseBlockValue);
449
450 std::optional<ValueLatticeElement>
451 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
452 bool UseBlockValue);
453
454 std::optional<ValueLatticeElement>
455 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
456 bool UseBlockValue, unsigned Depth = 0);
457
458 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
459 BasicBlock *BBFrom,
460 BasicBlock *BBTo,
461 bool UseBlockValue);
462
463 public:
464 /// This is the query interface to determine the lattice value for the
465 /// specified Value* at the context instruction (if specified) or at the
466 /// start of the block.
467 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
468 Instruction *CxtI = nullptr);
469
470 /// This is the query interface to determine the lattice value for the
471 /// specified Value* at the specified instruction using only information
472 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
473 /// recursive query is performed.
474 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
475
476 /// This is the query interface to determine the lattice
477 /// value for the specified Value* that is true on the specified edge.
478 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
479 BasicBlock *ToBB,
480 Instruction *CxtI = nullptr);
481
482 ValueLatticeElement getValueAtUse(const Use &U);
483
484 /// Complete flush all previously computed values
clear()485 void clear() {
486 TheCache.clear();
487 }
488
489 /// Printing the LazyValueInfo Analysis.
printLVI(Function & F,DominatorTree & DTree,raw_ostream & OS)490 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
491 LazyValueInfoAnnotatedWriter Writer(this, DTree);
492 F.print(OS, &Writer);
493 }
494
495 /// This is part of the update interface to remove information related to this
496 /// value from the cache.
forgetValue(Value * V)497 void forgetValue(Value *V) { TheCache.eraseValue(V); }
498
499 /// This is part of the update interface to inform the cache
500 /// that a block has been deleted.
eraseBlock(BasicBlock * BB)501 void eraseBlock(BasicBlock *BB) {
502 TheCache.eraseBlock(BB);
503 }
504
505 /// This is the update interface to inform the cache that an edge from
506 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
507 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
508
LazyValueInfoImpl(AssumptionCache * AC,const DataLayout & DL,Function * GuardDecl)509 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
510 Function *GuardDecl)
511 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
512 };
513 } // namespace llvm
514
solve()515 void LazyValueInfoImpl::solve() {
516 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
517 BlockValueStack.begin(), BlockValueStack.end());
518
519 unsigned processedCount = 0;
520 while (!BlockValueStack.empty()) {
521 processedCount++;
522 // Abort if we have to process too many values to get a result for this one.
523 // Because of the design of the overdefined cache currently being per-block
524 // to avoid naming-related issues (IE it wants to try to give different
525 // results for the same name in different blocks), overdefined results don't
526 // get cached globally, which in turn means we will often try to rediscover
527 // the same overdefined result again and again. Once something like
528 // PredicateInfo is used in LVI or CVP, we should be able to make the
529 // overdefined cache global, and remove this throttle.
530 if (processedCount > MaxProcessedPerValue) {
531 LLVM_DEBUG(
532 dbgs() << "Giving up on stack because we are getting too deep\n");
533 // Fill in the original values
534 while (!StartingStack.empty()) {
535 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
536 TheCache.insertResult(e.second, e.first,
537 ValueLatticeElement::getOverdefined());
538 StartingStack.pop_back();
539 }
540 BlockValueSet.clear();
541 BlockValueStack.clear();
542 return;
543 }
544 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
545 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
546 unsigned StackSize = BlockValueStack.size();
547 (void) StackSize;
548
549 if (solveBlockValue(e.second, e.first)) {
550 // The work item was completely processed.
551 assert(BlockValueStack.size() == StackSize &&
552 BlockValueStack.back() == e && "Nothing should have been pushed!");
553 #ifndef NDEBUG
554 std::optional<ValueLatticeElement> BBLV =
555 TheCache.getCachedValueInfo(e.second, e.first);
556 assert(BBLV && "Result should be in cache!");
557 LLVM_DEBUG(
558 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
559 << *BBLV << "\n");
560 #endif
561
562 BlockValueStack.pop_back();
563 BlockValueSet.erase(e);
564 } else {
565 // More work needs to be done before revisiting.
566 assert(BlockValueStack.size() == StackSize + 1 &&
567 "Exactly one element should have been pushed!");
568 }
569 }
570 }
571
572 std::optional<ValueLatticeElement>
getBlockValue(Value * Val,BasicBlock * BB,Instruction * CxtI)573 LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
574 Instruction *CxtI) {
575 // If already a constant, there is nothing to compute.
576 if (Constant *VC = dyn_cast<Constant>(Val))
577 return ValueLatticeElement::get(VC);
578
579 if (std::optional<ValueLatticeElement> OptLatticeVal =
580 TheCache.getCachedValueInfo(Val, BB)) {
581 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
582 return OptLatticeVal;
583 }
584
585 // We have hit a cycle, assume overdefined.
586 if (!pushBlockValue({ BB, Val }))
587 return ValueLatticeElement::getOverdefined();
588
589 // Yet to be resolved.
590 return std::nullopt;
591 }
592
getFromRangeMetadata(Instruction * BBI)593 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
594 switch (BBI->getOpcode()) {
595 default:
596 break;
597 case Instruction::Call:
598 case Instruction::Invoke:
599 if (std::optional<ConstantRange> Range = cast<CallBase>(BBI)->getRange())
600 return ValueLatticeElement::getRange(*Range);
601 [[fallthrough]];
602 case Instruction::Load:
603 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
604 if (isa<IntegerType>(BBI->getType())) {
605 return ValueLatticeElement::getRange(
606 getConstantRangeFromMetadata(*Ranges));
607 }
608 break;
609 };
610 // Nothing known - will be intersected with other facts
611 return ValueLatticeElement::getOverdefined();
612 }
613
solveBlockValue(Value * Val,BasicBlock * BB)614 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
615 assert(!isa<Constant>(Val) && "Value should not be constant");
616 assert(!TheCache.getCachedValueInfo(Val, BB) &&
617 "Value should not be in cache");
618
619 // Hold off inserting this value into the Cache in case we have to return
620 // false and come back later.
621 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
622 if (!Res)
623 // Work pushed, will revisit
624 return false;
625
626 TheCache.insertResult(Val, BB, *Res);
627 return true;
628 }
629
630 std::optional<ValueLatticeElement>
solveBlockValueImpl(Value * Val,BasicBlock * BB)631 LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
632 Instruction *BBI = dyn_cast<Instruction>(Val);
633 if (!BBI || BBI->getParent() != BB)
634 return solveBlockValueNonLocal(Val, BB);
635
636 if (PHINode *PN = dyn_cast<PHINode>(BBI))
637 return solveBlockValuePHINode(PN, BB);
638
639 if (auto *SI = dyn_cast<SelectInst>(BBI))
640 return solveBlockValueSelect(SI, BB);
641
642 // If this value is a nonnull pointer, record it's range and bailout. Note
643 // that for all other pointer typed values, we terminate the search at the
644 // definition. We could easily extend this to look through geps, bitcasts,
645 // and the like to prove non-nullness, but it's not clear that's worth it
646 // compile time wise. The context-insensitive value walk done inside
647 // isKnownNonZero gets most of the profitable cases at much less expense.
648 // This does mean that we have a sensitivity to where the defining
649 // instruction is placed, even if it could legally be hoisted much higher.
650 // That is unfortunate.
651 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
652 if (PT && isKnownNonZero(BBI, DL))
653 return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
654
655 if (BBI->getType()->isIntOrIntVectorTy()) {
656 if (auto *CI = dyn_cast<CastInst>(BBI))
657 return solveBlockValueCast(CI, BB);
658
659 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
660 return solveBlockValueBinaryOp(BO, BB);
661
662 if (auto *IEI = dyn_cast<InsertElementInst>(BBI))
663 return solveBlockValueInsertElement(IEI, BB);
664
665 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
666 return solveBlockValueExtractValue(EVI, BB);
667
668 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
669 return solveBlockValueIntrinsic(II, BB);
670 }
671
672 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
673 << "' - unknown inst def found.\n");
674 return getFromRangeMetadata(BBI);
675 }
676
AddNonNullPointer(Value * Ptr,NonNullPointerSet & PtrSet)677 static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
678 // TODO: Use NullPointerIsDefined instead.
679 if (Ptr->getType()->getPointerAddressSpace() == 0)
680 PtrSet.insert(getUnderlyingObject(Ptr));
681 }
682
AddNonNullPointersByInstruction(Instruction * I,NonNullPointerSet & PtrSet)683 static void AddNonNullPointersByInstruction(
684 Instruction *I, NonNullPointerSet &PtrSet) {
685 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
686 AddNonNullPointer(L->getPointerOperand(), PtrSet);
687 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
688 AddNonNullPointer(S->getPointerOperand(), PtrSet);
689 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
690 if (MI->isVolatile()) return;
691
692 // FIXME: check whether it has a valuerange that excludes zero?
693 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
694 if (!Len || Len->isZero()) return;
695
696 AddNonNullPointer(MI->getRawDest(), PtrSet);
697 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
698 AddNonNullPointer(MTI->getRawSource(), PtrSet);
699 }
700 }
701
isNonNullAtEndOfBlock(Value * Val,BasicBlock * BB)702 bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
703 if (NullPointerIsDefined(BB->getParent(),
704 Val->getType()->getPointerAddressSpace()))
705 return false;
706
707 Val = Val->stripInBoundsOffsets();
708 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
709 NonNullPointerSet NonNullPointers;
710 for (Instruction &I : *BB)
711 AddNonNullPointersByInstruction(&I, NonNullPointers);
712 return NonNullPointers;
713 });
714 }
715
716 std::optional<ValueLatticeElement>
solveBlockValueNonLocal(Value * Val,BasicBlock * BB)717 LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
718 ValueLatticeElement Result; // Start Undefined.
719
720 // If this is the entry block, we must be asking about an argument.
721 if (BB->isEntryBlock()) {
722 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
723 if (std::optional<ConstantRange> Range = cast<Argument>(Val)->getRange())
724 return ValueLatticeElement::getRange(*Range);
725 return ValueLatticeElement::getOverdefined();
726 }
727
728 // Loop over all of our predecessors, merging what we know from them into
729 // result. If we encounter an unexplored predecessor, we eagerly explore it
730 // in a depth first manner. In practice, this has the effect of discovering
731 // paths we can't analyze eagerly without spending compile times analyzing
732 // other paths. This heuristic benefits from the fact that predecessors are
733 // frequently arranged such that dominating ones come first and we quickly
734 // find a path to function entry. TODO: We should consider explicitly
735 // canonicalizing to make this true rather than relying on this happy
736 // accident.
737 for (BasicBlock *Pred : predecessors(BB)) {
738 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
739 if (!EdgeResult)
740 // Explore that input, then return here
741 return std::nullopt;
742
743 Result.mergeIn(*EdgeResult);
744
745 // If we hit overdefined, exit early. The BlockVals entry is already set
746 // to overdefined.
747 if (Result.isOverdefined()) {
748 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
749 << "' - overdefined because of pred '"
750 << Pred->getName() << "' (non local).\n");
751 return Result;
752 }
753 }
754
755 // Return the merged value, which is more precise than 'overdefined'.
756 assert(!Result.isOverdefined());
757 return Result;
758 }
759
760 std::optional<ValueLatticeElement>
solveBlockValuePHINode(PHINode * PN,BasicBlock * BB)761 LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
762 ValueLatticeElement Result; // Start Undefined.
763
764 // Loop over all of our predecessors, merging what we know from them into
765 // result. See the comment about the chosen traversal order in
766 // solveBlockValueNonLocal; the same reasoning applies here.
767 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
768 BasicBlock *PhiBB = PN->getIncomingBlock(i);
769 Value *PhiVal = PN->getIncomingValue(i);
770 // Note that we can provide PN as the context value to getEdgeValue, even
771 // though the results will be cached, because PN is the value being used as
772 // the cache key in the caller.
773 std::optional<ValueLatticeElement> EdgeResult =
774 getEdgeValue(PhiVal, PhiBB, BB, PN);
775 if (!EdgeResult)
776 // Explore that input, then return here
777 return std::nullopt;
778
779 Result.mergeIn(*EdgeResult);
780
781 // If we hit overdefined, exit early. The BlockVals entry is already set
782 // to overdefined.
783 if (Result.isOverdefined()) {
784 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
785 << "' - overdefined because of pred (local).\n");
786
787 return Result;
788 }
789 }
790
791 // Return the merged value, which is more precise than 'overdefined'.
792 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
793 return Result;
794 }
795
796 // If we can determine a constraint on the value given conditions assumed by
797 // the program, intersect those constraints with BBLV
intersectAssumeOrGuardBlockValueConstantRange(Value * Val,ValueLatticeElement & BBLV,Instruction * BBI)798 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
799 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
800 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
801 if (!BBI)
802 return;
803
804 BasicBlock *BB = BBI->getParent();
805 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
806 if (!AssumeVH)
807 continue;
808
809 // Only check assumes in the block of the context instruction. Other
810 // assumes will have already been taken into account when the value was
811 // propagated from predecessor blocks.
812 auto *I = cast<CallInst>(AssumeVH);
813 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
814 continue;
815
816 BBLV = intersect(BBLV, *getValueFromCondition(Val, I->getArgOperand(0),
817 /*IsTrueDest*/ true,
818 /*UseBlockValue*/ false));
819 }
820
821 // If guards are not used in the module, don't spend time looking for them
822 if (GuardDecl && !GuardDecl->use_empty() &&
823 BBI->getIterator() != BB->begin()) {
824 for (Instruction &I :
825 make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
826 Value *Cond = nullptr;
827 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
828 BBLV = intersect(BBLV,
829 *getValueFromCondition(Val, Cond, /*IsTrueDest*/ true,
830 /*UseBlockValue*/ false));
831 }
832 }
833
834 if (BBLV.isOverdefined()) {
835 // Check whether we're checking at the terminator, and the pointer has
836 // been dereferenced in this block.
837 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
838 if (PTy && BB->getTerminator() == BBI &&
839 isNonNullAtEndOfBlock(Val, BB))
840 BBLV = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
841 }
842 }
843
844 std::optional<ValueLatticeElement>
solveBlockValueSelect(SelectInst * SI,BasicBlock * BB)845 LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
846 // Recurse on our inputs if needed
847 std::optional<ValueLatticeElement> OptTrueVal =
848 getBlockValue(SI->getTrueValue(), BB, SI);
849 if (!OptTrueVal)
850 return std::nullopt;
851 ValueLatticeElement &TrueVal = *OptTrueVal;
852
853 std::optional<ValueLatticeElement> OptFalseVal =
854 getBlockValue(SI->getFalseValue(), BB, SI);
855 if (!OptFalseVal)
856 return std::nullopt;
857 ValueLatticeElement &FalseVal = *OptFalseVal;
858
859 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
860 const ConstantRange &TrueCR = TrueVal.asConstantRange(SI->getType());
861 const ConstantRange &FalseCR = FalseVal.asConstantRange(SI->getType());
862 Value *LHS = nullptr;
863 Value *RHS = nullptr;
864 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
865 // Is this a min specifically of our two inputs? (Avoid the risk of
866 // ValueTracking getting smarter looking back past our immediate inputs.)
867 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
868 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
869 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
870 ConstantRange ResultCR = [&]() {
871 switch (SPR.Flavor) {
872 default:
873 llvm_unreachable("unexpected minmax type!");
874 case SPF_SMIN: /// Signed minimum
875 return TrueCR.smin(FalseCR);
876 case SPF_UMIN: /// Unsigned minimum
877 return TrueCR.umin(FalseCR);
878 case SPF_SMAX: /// Signed maximum
879 return TrueCR.smax(FalseCR);
880 case SPF_UMAX: /// Unsigned maximum
881 return TrueCR.umax(FalseCR);
882 };
883 }();
884 return ValueLatticeElement::getRange(
885 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
886 FalseVal.isConstantRangeIncludingUndef());
887 }
888
889 if (SPR.Flavor == SPF_ABS) {
890 if (LHS == SI->getTrueValue())
891 return ValueLatticeElement::getRange(
892 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
893 if (LHS == SI->getFalseValue())
894 return ValueLatticeElement::getRange(
895 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
896 }
897
898 if (SPR.Flavor == SPF_NABS) {
899 ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
900 if (LHS == SI->getTrueValue())
901 return ValueLatticeElement::getRange(
902 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
903 if (LHS == SI->getFalseValue())
904 return ValueLatticeElement::getRange(
905 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
906 }
907 }
908
909 // Can we constrain the facts about the true and false values by using the
910 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
911 // TODO: We could potentially refine an overdefined true value above.
912 Value *Cond = SI->getCondition();
913 // If the value is undef, a different value may be chosen in
914 // the select condition.
915 if (isGuaranteedNotToBeUndef(Cond, AC)) {
916 TrueVal =
917 intersect(TrueVal, *getValueFromCondition(SI->getTrueValue(), Cond,
918 /*IsTrueDest*/ true,
919 /*UseBlockValue*/ false));
920 FalseVal =
921 intersect(FalseVal, *getValueFromCondition(SI->getFalseValue(), Cond,
922 /*IsTrueDest*/ false,
923 /*UseBlockValue*/ false));
924 }
925
926 ValueLatticeElement Result = TrueVal;
927 Result.mergeIn(FalseVal);
928 return Result;
929 }
930
931 std::optional<ConstantRange>
getRangeFor(Value * V,Instruction * CxtI,BasicBlock * BB)932 LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
933 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
934 if (!OptVal)
935 return std::nullopt;
936 return OptVal->asConstantRange(V->getType());
937 }
938
939 std::optional<ValueLatticeElement>
solveBlockValueCast(CastInst * CI,BasicBlock * BB)940 LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
941 // Filter out casts we don't know how to reason about before attempting to
942 // recurse on our operand. This can cut a long search short if we know we're
943 // not going to be able to get any useful information anways.
944 switch (CI->getOpcode()) {
945 case Instruction::Trunc:
946 case Instruction::SExt:
947 case Instruction::ZExt:
948 break;
949 default:
950 // Unhandled instructions are overdefined.
951 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
952 << "' - overdefined (unknown cast).\n");
953 return ValueLatticeElement::getOverdefined();
954 }
955
956 // Figure out the range of the LHS. If that fails, we still apply the
957 // transfer rule on the full set since we may be able to locally infer
958 // interesting facts.
959 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
960 if (!LHSRes)
961 // More work to do before applying this transfer rule.
962 return std::nullopt;
963 const ConstantRange &LHSRange = *LHSRes;
964
965 const unsigned ResultBitWidth = CI->getType()->getScalarSizeInBits();
966
967 // NOTE: We're currently limited by the set of operations that ConstantRange
968 // can evaluate symbolically. Enhancing that set will allows us to analyze
969 // more definitions.
970 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
971 ResultBitWidth));
972 }
973
974 std::optional<ValueLatticeElement>
solveBlockValueBinaryOpImpl(Instruction * I,BasicBlock * BB,std::function<ConstantRange (const ConstantRange &,const ConstantRange &)> OpFn)975 LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
976 Instruction *I, BasicBlock *BB,
977 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
978 OpFn) {
979 // Figure out the ranges of the operands. If that fails, use a
980 // conservative range, but apply the transfer rule anyways. This
981 // lets us pick up facts from expressions like "and i32 (call i32
982 // @foo()), 32"
983 std::optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
984 if (!LHSRes)
985 return std::nullopt;
986
987 std::optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
988 if (!RHSRes)
989 return std::nullopt;
990
991 const ConstantRange &LHSRange = *LHSRes;
992 const ConstantRange &RHSRange = *RHSRes;
993 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
994 }
995
996 std::optional<ValueLatticeElement>
solveBlockValueBinaryOp(BinaryOperator * BO,BasicBlock * BB)997 LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
998 assert(BO->getOperand(0)->getType()->isSized() &&
999 "all operands to binary operators are sized");
1000 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1001 unsigned NoWrapKind = OBO->getNoWrapKind();
1002 return solveBlockValueBinaryOpImpl(
1003 BO, BB,
1004 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1005 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1006 });
1007 }
1008
1009 return solveBlockValueBinaryOpImpl(
1010 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1011 return CR1.binaryOp(BO->getOpcode(), CR2);
1012 });
1013 }
1014
1015 std::optional<ValueLatticeElement>
solveBlockValueOverflowIntrinsic(WithOverflowInst * WO,BasicBlock * BB)1016 LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1017 BasicBlock *BB) {
1018 return solveBlockValueBinaryOpImpl(
1019 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1020 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1021 });
1022 }
1023
1024 std::optional<ValueLatticeElement>
solveBlockValueIntrinsic(IntrinsicInst * II,BasicBlock * BB)1025 LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1026 ValueLatticeElement MetadataVal = getFromRangeMetadata(II);
1027 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1028 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1029 << "' - unknown intrinsic.\n");
1030 return MetadataVal;
1031 }
1032
1033 SmallVector<ConstantRange, 2> OpRanges;
1034 for (Value *Op : II->args()) {
1035 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1036 if (!Range)
1037 return std::nullopt;
1038 OpRanges.push_back(*Range);
1039 }
1040
1041 return intersect(ValueLatticeElement::getRange(ConstantRange::intrinsic(
1042 II->getIntrinsicID(), OpRanges)),
1043 MetadataVal);
1044 }
1045
1046 std::optional<ValueLatticeElement>
solveBlockValueInsertElement(InsertElementInst * IEI,BasicBlock * BB)1047 LazyValueInfoImpl::solveBlockValueInsertElement(InsertElementInst *IEI,
1048 BasicBlock *BB) {
1049 std::optional<ValueLatticeElement> OptEltVal =
1050 getBlockValue(IEI->getOperand(1), BB, IEI);
1051 if (!OptEltVal)
1052 return std::nullopt;
1053 ValueLatticeElement &Res = *OptEltVal;
1054
1055 std::optional<ValueLatticeElement> OptVecVal =
1056 getBlockValue(IEI->getOperand(0), BB, IEI);
1057 if (!OptVecVal)
1058 return std::nullopt;
1059
1060 Res.mergeIn(*OptVecVal);
1061 return Res;
1062 }
1063
1064 std::optional<ValueLatticeElement>
solveBlockValueExtractValue(ExtractValueInst * EVI,BasicBlock * BB)1065 LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1066 BasicBlock *BB) {
1067 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1068 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1069 return solveBlockValueOverflowIntrinsic(WO, BB);
1070
1071 // Handle extractvalue of insertvalue to allow further simplification
1072 // based on replaced with.overflow intrinsics.
1073 if (Value *V = simplifyExtractValueInst(
1074 EVI->getAggregateOperand(), EVI->getIndices(),
1075 EVI->getDataLayout()))
1076 return getBlockValue(V, BB, EVI);
1077
1078 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1079 << "' - overdefined (unknown extractvalue).\n");
1080 return ValueLatticeElement::getOverdefined();
1081 }
1082
matchICmpOperand(APInt & Offset,Value * LHS,Value * Val,ICmpInst::Predicate Pred)1083 static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1084 ICmpInst::Predicate Pred) {
1085 if (LHS == Val)
1086 return true;
1087
1088 // Handle range checking idiom produced by InstCombine. We will subtract the
1089 // offset from the allowed range for RHS in this case.
1090 const APInt *C;
1091 if (match(LHS, m_AddLike(m_Specific(Val), m_APInt(C)))) {
1092 Offset = *C;
1093 return true;
1094 }
1095
1096 // Handle the symmetric case. This appears in saturation patterns like
1097 // (x == 16) ? 16 : (x + 1).
1098 if (match(Val, m_AddLike(m_Specific(LHS), m_APInt(C)))) {
1099 Offset = -*C;
1100 return true;
1101 }
1102
1103 // If (x | y) < C, then (x < C) && (y < C).
1104 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1105 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1106 return true;
1107
1108 // If (x & y) > C, then (x > C) && (y > C).
1109 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1110 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1111 return true;
1112
1113 return false;
1114 }
1115
1116 /// Get value range for a "(Val + Offset) Pred RHS" condition.
1117 std::optional<ValueLatticeElement>
getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,Value * RHS,const APInt & Offset,Instruction * CxtI,bool UseBlockValue)1118 LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1119 Value *RHS,
1120 const APInt &Offset,
1121 Instruction *CxtI,
1122 bool UseBlockValue) {
1123 ConstantRange RHSRange(RHS->getType()->getScalarSizeInBits(),
1124 /*isFullSet=*/true);
1125 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1126 RHSRange = ConstantRange(CI->getValue());
1127 } else if (UseBlockValue) {
1128 std::optional<ValueLatticeElement> R =
1129 getBlockValue(RHS, CxtI->getParent(), CxtI);
1130 if (!R)
1131 return std::nullopt;
1132 RHSRange = R->asConstantRange(RHS->getType());
1133 }
1134
1135 ConstantRange TrueValues =
1136 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1137 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1138 }
1139
1140 static std::optional<ConstantRange>
getRangeViaSLT(CmpInst::Predicate Pred,APInt RHS,function_ref<std::optional<ConstantRange> (const APInt &)> Fn)1141 getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS,
1142 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1143 bool Invert = false;
1144 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1145 Pred = ICmpInst::getInversePredicate(Pred);
1146 Invert = true;
1147 }
1148 if (Pred == ICmpInst::ICMP_SLE) {
1149 Pred = ICmpInst::ICMP_SLT;
1150 if (RHS.isMaxSignedValue())
1151 return std::nullopt; // Could also return full/empty here, if we wanted.
1152 ++RHS;
1153 }
1154 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1155 if (auto CR = Fn(RHS))
1156 return Invert ? CR->inverse() : CR;
1157 return std::nullopt;
1158 }
1159
getValueFromICmpCondition(Value * Val,ICmpInst * ICI,bool isTrueDest,bool UseBlockValue)1160 std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1161 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1162 Value *LHS = ICI->getOperand(0);
1163 Value *RHS = ICI->getOperand(1);
1164
1165 // Get the predicate that must hold along the considered edge.
1166 CmpInst::Predicate EdgePred =
1167 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1168
1169 if (isa<Constant>(RHS)) {
1170 if (ICI->isEquality() && LHS == Val) {
1171 if (EdgePred == ICmpInst::ICMP_EQ)
1172 return ValueLatticeElement::get(cast<Constant>(RHS));
1173 else if (!isa<UndefValue>(RHS))
1174 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1175 }
1176 }
1177
1178 Type *Ty = Val->getType();
1179 if (!Ty->isIntegerTy())
1180 return ValueLatticeElement::getOverdefined();
1181
1182 unsigned BitWidth = Ty->getScalarSizeInBits();
1183 APInt Offset(BitWidth, 0);
1184 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1185 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1186 UseBlockValue);
1187
1188 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1189 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1190 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1191 UseBlockValue);
1192
1193 const APInt *Mask, *C;
1194 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1195 match(RHS, m_APInt(C))) {
1196 // If (Val & Mask) == C then all the masked bits are known and we can
1197 // compute a value range based on that.
1198 if (EdgePred == ICmpInst::ICMP_EQ) {
1199 KnownBits Known;
1200 Known.Zero = ~*C & *Mask;
1201 Known.One = *C & *Mask;
1202 return ValueLatticeElement::getRange(
1203 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1204 }
1205
1206 if (EdgePred == ICmpInst::ICMP_NE)
1207 return ValueLatticeElement::getRange(
1208 ConstantRange::makeMaskNotEqualRange(*Mask, *C));
1209 }
1210
1211 // If (X urem Modulus) >= C, then X >= C.
1212 // If trunc X >= C, then X >= C.
1213 // TODO: An upper bound could be computed as well.
1214 if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
1215 m_Trunc(m_Specific(Val)))) &&
1216 match(RHS, m_APInt(C))) {
1217 // Use the icmp region so we don't have to deal with different predicates.
1218 ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
1219 if (!CR.isEmptySet())
1220 return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1221 CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0)));
1222 }
1223
1224 // Recognize:
1225 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1226 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1227 const APInt *ShAmtC;
1228 if (CmpInst::isSigned(EdgePred) &&
1229 match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1230 match(RHS, m_APInt(C))) {
1231 auto CR = getRangeViaSLT(
1232 EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1233 APInt New = RHS << *ShAmtC;
1234 if ((New.ashr(*ShAmtC)) != RHS)
1235 return std::nullopt;
1236 return ConstantRange::getNonEmpty(
1237 APInt::getSignedMinValue(New.getBitWidth()), New);
1238 });
1239 if (CR)
1240 return ValueLatticeElement::getRange(*CR);
1241 }
1242
1243 return ValueLatticeElement::getOverdefined();
1244 }
1245
1246 // Handle conditions of the form
1247 // extractvalue(op.with.overflow(%x, C), 1).
getValueFromOverflowCondition(Value * Val,WithOverflowInst * WO,bool IsTrueDest)1248 static ValueLatticeElement getValueFromOverflowCondition(
1249 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1250 // TODO: This only works with a constant RHS for now. We could also compute
1251 // the range of the RHS, but this doesn't fit into the current structure of
1252 // the edge value calculation.
1253 const APInt *C;
1254 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1255 return ValueLatticeElement::getOverdefined();
1256
1257 // Calculate the possible values of %x for which no overflow occurs.
1258 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1259 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1260
1261 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1262 // constrained to it's inverse (all values that might cause overflow).
1263 if (IsTrueDest)
1264 NWR = NWR.inverse();
1265 return ValueLatticeElement::getRange(NWR);
1266 }
1267
1268 std::optional<ValueLatticeElement>
getValueFromCondition(Value * Val,Value * Cond,bool IsTrueDest,bool UseBlockValue,unsigned Depth)1269 LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1270 bool IsTrueDest, bool UseBlockValue,
1271 unsigned Depth) {
1272 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1273 return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1274
1275 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1276 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1277 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1278 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1279
1280 if (++Depth == MaxAnalysisRecursionDepth)
1281 return ValueLatticeElement::getOverdefined();
1282
1283 Value *N;
1284 if (match(Cond, m_Not(m_Value(N))))
1285 return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1286
1287 Value *L, *R;
1288 bool IsAnd;
1289 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1290 IsAnd = true;
1291 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1292 IsAnd = false;
1293 else
1294 return ValueLatticeElement::getOverdefined();
1295
1296 std::optional<ValueLatticeElement> LV =
1297 getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1298 if (!LV)
1299 return std::nullopt;
1300 std::optional<ValueLatticeElement> RV =
1301 getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1302 if (!RV)
1303 return std::nullopt;
1304
1305 // if (L && R) -> intersect L and R
1306 // if (!(L || R)) -> intersect !L and !R
1307 // if (L || R) -> union L and R
1308 // if (!(L && R)) -> union !L and !R
1309 if (IsTrueDest ^ IsAnd) {
1310 LV->mergeIn(*RV);
1311 return *LV;
1312 }
1313
1314 return intersect(*LV, *RV);
1315 }
1316
1317 // Return true if Usr has Op as an operand, otherwise false.
usesOperand(User * Usr,Value * Op)1318 static bool usesOperand(User *Usr, Value *Op) {
1319 return is_contained(Usr->operands(), Op);
1320 }
1321
1322 // Return true if the instruction type of Val is supported by
1323 // constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1324 // Call this before calling constantFoldUser() to find out if it's even worth
1325 // attempting to call it.
isOperationFoldable(User * Usr)1326 static bool isOperationFoldable(User *Usr) {
1327 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1328 }
1329
1330 // Check if Usr can be simplified to an integer constant when the value of one
1331 // of its operands Op is an integer constant OpConstVal. If so, return it as an
1332 // lattice value range with a single element or otherwise return an overdefined
1333 // lattice value.
constantFoldUser(User * Usr,Value * Op,const APInt & OpConstVal,const DataLayout & DL)1334 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1335 const APInt &OpConstVal,
1336 const DataLayout &DL) {
1337 assert(isOperationFoldable(Usr) && "Precondition");
1338 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1339 // Check if Usr can be simplified to a constant.
1340 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1341 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1342 if (auto *C = dyn_cast_or_null<ConstantInt>(
1343 simplifyCastInst(CI->getOpcode(), OpConst,
1344 CI->getDestTy(), DL))) {
1345 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1346 }
1347 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1348 bool Op0Match = BO->getOperand(0) == Op;
1349 bool Op1Match = BO->getOperand(1) == Op;
1350 assert((Op0Match || Op1Match) &&
1351 "Operand 0 nor Operand 1 isn't a match");
1352 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1353 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1354 if (auto *C = dyn_cast_or_null<ConstantInt>(
1355 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1356 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1357 }
1358 } else if (isa<FreezeInst>(Usr)) {
1359 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1360 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1361 }
1362 return ValueLatticeElement::getOverdefined();
1363 }
1364
1365 /// Compute the value of Val on the edge BBFrom -> BBTo.
1366 std::optional<ValueLatticeElement>
getEdgeValueLocal(Value * Val,BasicBlock * BBFrom,BasicBlock * BBTo,bool UseBlockValue)1367 LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1368 BasicBlock *BBTo, bool UseBlockValue) {
1369 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1370 // know that v != 0.
1371 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1372 // If this is a conditional branch and only one successor goes to BBTo, then
1373 // we may be able to infer something from the condition.
1374 if (BI->isConditional() &&
1375 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1376 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1377 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1378 "BBTo isn't a successor of BBFrom");
1379 Value *Condition = BI->getCondition();
1380
1381 // If V is the condition of the branch itself, then we know exactly what
1382 // it is.
1383 // NB: The condition on a `br` can't be a vector type.
1384 if (Condition == Val)
1385 return ValueLatticeElement::get(ConstantInt::get(
1386 Type::getInt1Ty(Val->getContext()), isTrueDest));
1387
1388 // If the condition of the branch is an equality comparison, we may be
1389 // able to infer the value.
1390 std::optional<ValueLatticeElement> Result =
1391 getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1392 if (!Result)
1393 return std::nullopt;
1394
1395 if (!Result->isOverdefined())
1396 return Result;
1397
1398 if (User *Usr = dyn_cast<User>(Val)) {
1399 assert(Result->isOverdefined() && "Result isn't overdefined");
1400 // Check with isOperationFoldable() first to avoid linearly iterating
1401 // over the operands unnecessarily which can be expensive for
1402 // instructions with many operands.
1403 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1404 const DataLayout &DL = BBTo->getDataLayout();
1405 if (usesOperand(Usr, Condition)) {
1406 // If Val has Condition as an operand and Val can be folded into a
1407 // constant with either Condition == true or Condition == false,
1408 // propagate the constant.
1409 // eg.
1410 // ; %Val is true on the edge to %then.
1411 // %Val = and i1 %Condition, true.
1412 // br %Condition, label %then, label %else
1413 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1414 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1415 } else {
1416 // If one of Val's operand has an inferred value, we may be able to
1417 // infer the value of Val.
1418 // eg.
1419 // ; %Val is 94 on the edge to %then.
1420 // %Val = add i8 %Op, 1
1421 // %Condition = icmp eq i8 %Op, 93
1422 // br i1 %Condition, label %then, label %else
1423 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1424 Value *Op = Usr->getOperand(i);
1425 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1426 Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1427 if (std::optional<APInt> OpConst =
1428 OpLatticeVal.asConstantInteger()) {
1429 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1430 break;
1431 }
1432 }
1433 }
1434 }
1435 }
1436 if (!Result->isOverdefined())
1437 return Result;
1438 }
1439 }
1440
1441 // If the edge was formed by a switch on the value, then we may know exactly
1442 // what it is.
1443 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1444 Value *Condition = SI->getCondition();
1445 if (!isa<IntegerType>(Val->getType()))
1446 return ValueLatticeElement::getOverdefined();
1447 bool ValUsesConditionAndMayBeFoldable = false;
1448 if (Condition != Val) {
1449 // Check if Val has Condition as an operand.
1450 if (User *Usr = dyn_cast<User>(Val))
1451 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1452 usesOperand(Usr, Condition);
1453 if (!ValUsesConditionAndMayBeFoldable)
1454 return ValueLatticeElement::getOverdefined();
1455 }
1456 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1457 "Condition != Val nor Val doesn't use Condition");
1458
1459 bool DefaultCase = SI->getDefaultDest() == BBTo;
1460 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1461 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1462
1463 for (auto Case : SI->cases()) {
1464 APInt CaseValue = Case.getCaseValue()->getValue();
1465 ConstantRange EdgeVal(CaseValue);
1466 if (ValUsesConditionAndMayBeFoldable) {
1467 User *Usr = cast<User>(Val);
1468 const DataLayout &DL = BBTo->getDataLayout();
1469 ValueLatticeElement EdgeLatticeVal =
1470 constantFoldUser(Usr, Condition, CaseValue, DL);
1471 if (EdgeLatticeVal.isOverdefined())
1472 return ValueLatticeElement::getOverdefined();
1473 EdgeVal = EdgeLatticeVal.getConstantRange();
1474 }
1475 if (DefaultCase) {
1476 // It is possible that the default destination is the destination of
1477 // some cases. We cannot perform difference for those cases.
1478 // We know Condition != CaseValue in BBTo. In some cases we can use
1479 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1480 // only do this when f is identity (i.e. Val == Condition), but we
1481 // should be able to do this for any injective f.
1482 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1483 EdgesVals = EdgesVals.difference(EdgeVal);
1484 } else if (Case.getCaseSuccessor() == BBTo)
1485 EdgesVals = EdgesVals.unionWith(EdgeVal);
1486 }
1487 return ValueLatticeElement::getRange(std::move(EdgesVals));
1488 }
1489 return ValueLatticeElement::getOverdefined();
1490 }
1491
1492 /// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1493 /// the basic block if the edge does not constrain Val.
1494 std::optional<ValueLatticeElement>
getEdgeValue(Value * Val,BasicBlock * BBFrom,BasicBlock * BBTo,Instruction * CxtI)1495 LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1496 BasicBlock *BBTo, Instruction *CxtI) {
1497 // If already a constant, there is nothing to compute.
1498 if (Constant *VC = dyn_cast<Constant>(Val))
1499 return ValueLatticeElement::get(VC);
1500
1501 std::optional<ValueLatticeElement> LocalResult =
1502 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1503 if (!LocalResult)
1504 return std::nullopt;
1505
1506 if (hasSingleValue(*LocalResult))
1507 // Can't get any more precise here
1508 return LocalResult;
1509
1510 std::optional<ValueLatticeElement> OptInBlock =
1511 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1512 if (!OptInBlock)
1513 return std::nullopt;
1514 ValueLatticeElement &InBlock = *OptInBlock;
1515
1516 // We can use the context instruction (generically the ultimate instruction
1517 // the calling pass is trying to simplify) here, even though the result of
1518 // this function is generally cached when called from the solve* functions
1519 // (and that cached result might be used with queries using a different
1520 // context instruction), because when this function is called from the solve*
1521 // functions, the context instruction is not provided. When called from
1522 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1523 // but then the result is not cached.
1524 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1525
1526 return intersect(*LocalResult, InBlock);
1527 }
1528
getValueInBlock(Value * V,BasicBlock * BB,Instruction * CxtI)1529 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1530 Instruction *CxtI) {
1531 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1532 << BB->getName() << "'\n");
1533
1534 assert(BlockValueStack.empty() && BlockValueSet.empty());
1535 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1536 if (!OptResult) {
1537 solve();
1538 OptResult = getBlockValue(V, BB, CxtI);
1539 assert(OptResult && "Value not available after solving");
1540 }
1541
1542 ValueLatticeElement Result = *OptResult;
1543 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1544 return Result;
1545 }
1546
getValueAt(Value * V,Instruction * CxtI)1547 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1548 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1549 << "'\n");
1550
1551 if (auto *C = dyn_cast<Constant>(V))
1552 return ValueLatticeElement::get(C);
1553
1554 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1555 if (auto *I = dyn_cast<Instruction>(V))
1556 Result = getFromRangeMetadata(I);
1557 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1558
1559 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1560 return Result;
1561 }
1562
1563 ValueLatticeElement LazyValueInfoImpl::
getValueOnEdge(Value * V,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1564 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1565 Instruction *CxtI) {
1566 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1567 << FromBB->getName() << "' to '" << ToBB->getName()
1568 << "'\n");
1569
1570 std::optional<ValueLatticeElement> Result =
1571 getEdgeValue(V, FromBB, ToBB, CxtI);
1572 while (!Result) {
1573 // As the worklist only explicitly tracks block values (but not edge values)
1574 // we may have to call solve() multiple times, as the edge value calculation
1575 // may request additional block values.
1576 solve();
1577 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1578 }
1579
1580 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1581 return *Result;
1582 }
1583
getValueAtUse(const Use & U)1584 ValueLatticeElement LazyValueInfoImpl::getValueAtUse(const Use &U) {
1585 Value *V = U.get();
1586 auto *CxtI = cast<Instruction>(U.getUser());
1587 ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1588
1589 // Check whether the only (possibly transitive) use of the value is in a
1590 // position where V can be constrained by a select or branch condition.
1591 const Use *CurrU = &U;
1592 // TODO: Increase limit?
1593 const unsigned MaxUsesToInspect = 3;
1594 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1595 std::optional<ValueLatticeElement> CondVal;
1596 auto *CurrI = cast<Instruction>(CurrU->getUser());
1597 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1598 // If the value is undef, a different value may be chosen in
1599 // the select condition and at use.
1600 if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1601 break;
1602 if (CurrU->getOperandNo() == 1)
1603 CondVal =
1604 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1605 /*UseBlockValue*/ false);
1606 else if (CurrU->getOperandNo() == 2)
1607 CondVal =
1608 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1609 /*UseBlockValue*/ false);
1610 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1611 // TODO: Use non-local query?
1612 CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1613 PHI->getParent(), /*UseBlockValue*/ false);
1614 }
1615 if (CondVal)
1616 VL = intersect(VL, *CondVal);
1617
1618 // Only follow one-use chain, to allow direct intersection of conditions.
1619 // If there are multiple uses, we would have to intersect with the union of
1620 // all conditions at different uses.
1621 // Stop walking if we hit a non-speculatable instruction. Even if the
1622 // result is only used under a specific condition, executing the
1623 // instruction itself may cause side effects or UB already.
1624 // This also disallows looking through phi nodes: If the phi node is part
1625 // of a cycle, we might end up reasoning about values from different cycle
1626 // iterations (PR60629).
1627 if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
1628 break;
1629 CurrU = &*CurrI->use_begin();
1630 }
1631 return VL;
1632 }
1633
threadEdge(BasicBlock * PredBB,BasicBlock * OldSucc,BasicBlock * NewSucc)1634 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1635 BasicBlock *NewSucc) {
1636 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1637 }
1638
1639 //===----------------------------------------------------------------------===//
1640 // LazyValueInfo Impl
1641 //===----------------------------------------------------------------------===//
1642
runOnFunction(Function & F)1643 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1644 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1645
1646 if (auto *Impl = Info.getImpl())
1647 Impl->clear();
1648
1649 // Fully lazy.
1650 return false;
1651 }
1652
getAnalysisUsage(AnalysisUsage & AU) const1653 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1654 AU.setPreservesAll();
1655 AU.addRequired<AssumptionCacheTracker>();
1656 AU.addRequired<TargetLibraryInfoWrapperPass>();
1657 }
1658
getLVI()1659 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1660
1661 /// This lazily constructs the LazyValueInfoImpl.
getOrCreateImpl(const Module * M)1662 LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1663 if (!PImpl) {
1664 assert(M && "getCache() called with a null Module");
1665 const DataLayout &DL = M->getDataLayout();
1666 Function *GuardDecl =
1667 M->getFunction(Intrinsic::getName(Intrinsic::experimental_guard));
1668 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1669 }
1670 return *static_cast<LazyValueInfoImpl *>(PImpl);
1671 }
1672
getImpl()1673 LazyValueInfoImpl *LazyValueInfo::getImpl() {
1674 if (!PImpl)
1675 return nullptr;
1676 return static_cast<LazyValueInfoImpl *>(PImpl);
1677 }
1678
~LazyValueInfo()1679 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1680
releaseMemory()1681 void LazyValueInfo::releaseMemory() {
1682 // If the cache was allocated, free it.
1683 if (auto *Impl = getImpl()) {
1684 delete &*Impl;
1685 PImpl = nullptr;
1686 }
1687 }
1688
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)1689 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1690 FunctionAnalysisManager::Invalidator &Inv) {
1691 // We need to invalidate if we have either failed to preserve this analyses
1692 // result directly or if any of its dependencies have been invalidated.
1693 auto PAC = PA.getChecker<LazyValueAnalysis>();
1694 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1695 return true;
1696
1697 return false;
1698 }
1699
releaseMemory()1700 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1701
run(Function & F,FunctionAnalysisManager & FAM)1702 LazyValueInfo LazyValueAnalysis::run(Function &F,
1703 FunctionAnalysisManager &FAM) {
1704 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1705
1706 return LazyValueInfo(&AC, &F.getDataLayout());
1707 }
1708
1709 /// Returns true if we can statically tell that this value will never be a
1710 /// "useful" constant. In practice, this means we've got something like an
1711 /// alloca or a malloc call for which a comparison against a constant can
1712 /// only be guarding dead code. Note that we are potentially giving up some
1713 /// precision in dead code (a constant result) in favour of avoiding a
1714 /// expensive search for a easily answered common query.
isKnownNonConstant(Value * V)1715 static bool isKnownNonConstant(Value *V) {
1716 V = V->stripPointerCasts();
1717 // The return val of alloc cannot be a Constant.
1718 if (isa<AllocaInst>(V))
1719 return true;
1720 return false;
1721 }
1722
getConstant(Value * V,Instruction * CxtI)1723 Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
1724 // Bail out early if V is known not to be a Constant.
1725 if (isKnownNonConstant(V))
1726 return nullptr;
1727
1728 BasicBlock *BB = CxtI->getParent();
1729 ValueLatticeElement Result =
1730 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1731
1732 if (Result.isConstant())
1733 return Result.getConstant();
1734 if (Result.isConstantRange()) {
1735 const ConstantRange &CR = Result.getConstantRange();
1736 if (const APInt *SingleVal = CR.getSingleElement())
1737 return ConstantInt::get(V->getType(), *SingleVal);
1738 }
1739 return nullptr;
1740 }
1741
getConstantRange(Value * V,Instruction * CxtI,bool UndefAllowed)1742 ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
1743 bool UndefAllowed) {
1744 BasicBlock *BB = CxtI->getParent();
1745 ValueLatticeElement Result =
1746 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1747 return Result.asConstantRange(V->getType(), UndefAllowed);
1748 }
1749
getConstantRangeAtUse(const Use & U,bool UndefAllowed)1750 ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
1751 bool UndefAllowed) {
1752 auto *Inst = cast<Instruction>(U.getUser());
1753 ValueLatticeElement Result =
1754 getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
1755 return Result.asConstantRange(U->getType(), UndefAllowed);
1756 }
1757
1758 /// Determine whether the specified value is known to be a
1759 /// constant on the specified edge. Return null if not.
getConstantOnEdge(Value * V,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1760 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1761 BasicBlock *ToBB,
1762 Instruction *CxtI) {
1763 Module *M = FromBB->getModule();
1764 ValueLatticeElement Result =
1765 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1766
1767 if (Result.isConstant())
1768 return Result.getConstant();
1769 if (Result.isConstantRange()) {
1770 const ConstantRange &CR = Result.getConstantRange();
1771 if (const APInt *SingleVal = CR.getSingleElement())
1772 return ConstantInt::get(V->getType(), *SingleVal);
1773 }
1774 return nullptr;
1775 }
1776
getConstantRangeOnEdge(Value * V,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1777 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1778 BasicBlock *FromBB,
1779 BasicBlock *ToBB,
1780 Instruction *CxtI) {
1781 Module *M = FromBB->getModule();
1782 ValueLatticeElement Result =
1783 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1784 // TODO: Should undef be allowed here?
1785 return Result.asConstantRange(V->getType(), /*UndefAllowed*/ true);
1786 }
1787
getPredicateResult(CmpInst::Predicate Pred,Constant * C,const ValueLatticeElement & Val,const DataLayout & DL)1788 static Constant *getPredicateResult(CmpInst::Predicate Pred, Constant *C,
1789 const ValueLatticeElement &Val,
1790 const DataLayout &DL) {
1791 // If we know the value is a constant, evaluate the conditional.
1792 if (Val.isConstant())
1793 return ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
1794
1795 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
1796 if (Val.isConstantRange()) {
1797 const ConstantRange &CR = Val.getConstantRange();
1798 ConstantRange RHS = C->toConstantRange();
1799 if (CR.icmp(Pred, RHS))
1800 return ConstantInt::getTrue(ResTy);
1801 if (CR.icmp(CmpInst::getInversePredicate(Pred), RHS))
1802 return ConstantInt::getFalse(ResTy);
1803 return nullptr;
1804 }
1805
1806 if (Val.isNotConstant()) {
1807 // If this is an equality comparison, we can try to fold it knowing that
1808 // "V != C1".
1809 if (Pred == ICmpInst::ICMP_EQ) {
1810 // !C1 == C -> false iff C1 == C.
1811 Constant *Res = ConstantFoldCompareInstOperands(
1812 ICmpInst::ICMP_NE, Val.getNotConstant(), C, DL);
1813 if (Res && Res->isNullValue())
1814 return ConstantInt::getFalse(ResTy);
1815 } else if (Pred == ICmpInst::ICMP_NE) {
1816 // !C1 != C -> true iff C1 == C.
1817 Constant *Res = ConstantFoldCompareInstOperands(
1818 ICmpInst::ICMP_NE, Val.getNotConstant(), C, DL);
1819 if (Res && Res->isNullValue())
1820 return ConstantInt::getTrue(ResTy);
1821 }
1822 return nullptr;
1823 }
1824
1825 return nullptr;
1826 }
1827
1828 /// Determine whether the specified value comparison with a constant is known to
1829 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
getPredicateOnEdge(CmpInst::Predicate Pred,Value * V,Constant * C,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1830 Constant *LazyValueInfo::getPredicateOnEdge(CmpInst::Predicate Pred, Value *V,
1831 Constant *C, BasicBlock *FromBB,
1832 BasicBlock *ToBB,
1833 Instruction *CxtI) {
1834 Module *M = FromBB->getModule();
1835 ValueLatticeElement Result =
1836 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1837
1838 return getPredicateResult(Pred, C, Result, M->getDataLayout());
1839 }
1840
getPredicateAt(CmpInst::Predicate Pred,Value * V,Constant * C,Instruction * CxtI,bool UseBlockValue)1841 Constant *LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred, Value *V,
1842 Constant *C, Instruction *CxtI,
1843 bool UseBlockValue) {
1844 // Is or is not NonNull are common predicates being queried. If
1845 // isKnownNonZero can tell us the result of the predicate, we can
1846 // return it quickly. But this is only a fastpath, and falling
1847 // through would still be correct.
1848 Module *M = CxtI->getModule();
1849 const DataLayout &DL = M->getDataLayout();
1850 if (V->getType()->isPointerTy() && C->isNullValue() &&
1851 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1852 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
1853 if (Pred == ICmpInst::ICMP_EQ)
1854 return ConstantInt::getFalse(ResTy);
1855 else if (Pred == ICmpInst::ICMP_NE)
1856 return ConstantInt::getTrue(ResTy);
1857 }
1858
1859 auto &Impl = getOrCreateImpl(M);
1860 ValueLatticeElement Result =
1861 UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
1862 : Impl.getValueAt(V, CxtI);
1863 Constant *Ret = getPredicateResult(Pred, C, Result, DL);
1864 if (Ret)
1865 return Ret;
1866
1867 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1868 // LVI as a whole tries to compute a lattice value which is conservatively
1869 // correct at a given location. In this case, we have a predicate which we
1870 // weren't able to prove about the merged result, and we're pushing that
1871 // predicate back along each incoming edge to see if we can prove it
1872 // separately for each input. As a motivating example, consider:
1873 // bb1:
1874 // %v1 = ... ; constantrange<1, 5>
1875 // br label %merge
1876 // bb2:
1877 // %v2 = ... ; constantrange<10, 20>
1878 // br label %merge
1879 // merge:
1880 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1881 // %pred = icmp eq i32 %phi, 8
1882 // We can't tell from the lattice value for '%phi' that '%pred' is false
1883 // along each path, but by checking the predicate over each input separately,
1884 // we can.
1885 // We limit the search to one step backwards from the current BB and value.
1886 // We could consider extending this to search further backwards through the
1887 // CFG and/or value graph, but there are non-obvious compile time vs quality
1888 // tradeoffs.
1889 BasicBlock *BB = CxtI->getParent();
1890
1891 // Function entry or an unreachable block. Bail to avoid confusing
1892 // analysis below.
1893 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1894 if (PI == PE)
1895 return nullptr;
1896
1897 // If V is a PHI node in the same block as the context, we need to ask
1898 // questions about the predicate as applied to the incoming value along
1899 // each edge. This is useful for eliminating cases where the predicate is
1900 // known along all incoming edges.
1901 if (auto *PHI = dyn_cast<PHINode>(V))
1902 if (PHI->getParent() == BB) {
1903 Constant *Baseline = nullptr;
1904 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1905 Value *Incoming = PHI->getIncomingValue(i);
1906 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1907 // Note that PredBB may be BB itself.
1908 Constant *Result =
1909 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1910
1911 // Keep going as long as we've seen a consistent known result for
1912 // all inputs.
1913 Baseline = (i == 0) ? Result /* First iteration */
1914 : (Baseline == Result ? Baseline
1915 : nullptr); /* All others */
1916 if (!Baseline)
1917 break;
1918 }
1919 if (Baseline)
1920 return Baseline;
1921 }
1922
1923 // For a comparison where the V is outside this block, it's possible
1924 // that we've branched on it before. Look to see if the value is known
1925 // on all incoming edges.
1926 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1927 // For predecessor edge, determine if the comparison is true or false
1928 // on that edge. If they're all true or all false, we can conclude
1929 // the value of the comparison in this block.
1930 Constant *Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1931 if (Baseline) {
1932 // Check that all remaining incoming values match the first one.
1933 while (++PI != PE) {
1934 Constant *Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1935 if (Ret != Baseline)
1936 break;
1937 }
1938 // If we terminated early, then one of the values didn't match.
1939 if (PI == PE) {
1940 return Baseline;
1941 }
1942 }
1943 }
1944
1945 return nullptr;
1946 }
1947
getPredicateAt(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Instruction * CxtI,bool UseBlockValue)1948 Constant *LazyValueInfo::getPredicateAt(CmpInst::Predicate Pred, Value *LHS,
1949 Value *RHS, Instruction *CxtI,
1950 bool UseBlockValue) {
1951 if (auto *C = dyn_cast<Constant>(RHS))
1952 return getPredicateAt(Pred, LHS, C, CxtI, UseBlockValue);
1953 if (auto *C = dyn_cast<Constant>(LHS))
1954 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1955 UseBlockValue);
1956
1957 // Got two non-Constant values. Try to determine the comparison results based
1958 // on the block values of the two operands, e.g. because they have
1959 // non-overlapping ranges.
1960 if (UseBlockValue) {
1961 Module *M = CxtI->getModule();
1962 ValueLatticeElement L =
1963 getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
1964 if (L.isOverdefined())
1965 return nullptr;
1966
1967 ValueLatticeElement R =
1968 getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
1969 Type *Ty = CmpInst::makeCmpResultType(LHS->getType());
1970 return L.getCompare(Pred, Ty, R, M->getDataLayout());
1971 }
1972 return nullptr;
1973 }
1974
threadEdge(BasicBlock * PredBB,BasicBlock * OldSucc,BasicBlock * NewSucc)1975 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1976 BasicBlock *NewSucc) {
1977 if (auto *Impl = getImpl())
1978 Impl->threadEdge(PredBB, OldSucc, NewSucc);
1979 }
1980
forgetValue(Value * V)1981 void LazyValueInfo::forgetValue(Value *V) {
1982 if (auto *Impl = getImpl())
1983 Impl->forgetValue(V);
1984 }
1985
eraseBlock(BasicBlock * BB)1986 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1987 if (auto *Impl = getImpl())
1988 Impl->eraseBlock(BB);
1989 }
1990
clear()1991 void LazyValueInfo::clear() {
1992 if (auto *Impl = getImpl())
1993 Impl->clear();
1994 }
1995
printLVI(Function & F,DominatorTree & DTree,raw_ostream & OS)1996 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
1997 if (auto *Impl = getImpl())
1998 Impl->printLVI(F, DTree, OS);
1999 }
2000
2001 // Print the LVI for the function arguments at the start of each basic block.
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)2002 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2003 const BasicBlock *BB, formatted_raw_ostream &OS) {
2004 // Find if there are latticevalues defined for arguments of the function.
2005 auto *F = BB->getParent();
2006 for (const auto &Arg : F->args()) {
2007 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2008 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
2009 if (Result.isUnknown())
2010 continue;
2011 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2012 }
2013 }
2014
2015 // This function prints the LVI analysis for the instruction I at the beginning
2016 // of various basic blocks. It relies on calculated values that are stored in
2017 // the LazyValueInfoCache, and in the absence of cached values, recalculate the
2018 // LazyValueInfo for `I`, and print that info.
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)2019 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2020 const Instruction *I, formatted_raw_ostream &OS) {
2021
2022 auto *ParentBB = I->getParent();
2023 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2024 // We can generate (solve) LVI values only for blocks that are dominated by
2025 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2026 // that contain redundant/uninteresting information, we print LVI for
2027 // blocks that may use this LVI information (such as immediate successor
2028 // blocks, and blocks that contain uses of `I`).
2029 auto printResult = [&](const BasicBlock *BB) {
2030 if (!BlocksContainingLVI.insert(BB).second)
2031 return;
2032 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2033 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2034 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2035 BB->printAsOperand(OS, false);
2036 OS << "' is: " << Result << "\n";
2037 };
2038
2039 printResult(ParentBB);
2040 // Print the LVI analysis results for the immediate successor blocks, that
2041 // are dominated by `ParentBB`.
2042 for (const auto *BBSucc : successors(ParentBB))
2043 if (DT.dominates(ParentBB, BBSucc))
2044 printResult(BBSucc);
2045
2046 // Print LVI in blocks where `I` is used.
2047 for (const auto *U : I->users())
2048 if (auto *UseI = dyn_cast<Instruction>(U))
2049 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2050 printResult(UseI->getParent());
2051
2052 }
2053
run(Function & F,FunctionAnalysisManager & AM)2054 PreservedAnalyses LazyValueInfoPrinterPass::run(Function &F,
2055 FunctionAnalysisManager &AM) {
2056 OS << "LVI for function '" << F.getName() << "':\n";
2057 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2058 auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2059 LVI.printLVI(F, DTree, OS);
2060 return PreservedAnalyses::all();
2061 }
2062