1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interface for lazy computation of value constraint 10 // information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/LazyValueInfo.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/Optional.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/ConstantFolding.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/ValueLattice.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/AssemblyAnnotationWriter.h" 25 #include "llvm/IR/CFG.h" 26 #include "llvm/IR/ConstantRange.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Dominators.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/IR/PatternMatch.h" 35 #include "llvm/IR/ValueHandle.h" 36 #include "llvm/InitializePasses.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/FormattedStream.h" 39 #include "llvm/Support/KnownBits.h" 40 #include "llvm/Support/raw_ostream.h" 41 #include <map> 42 using namespace llvm; 43 using namespace PatternMatch; 44 45 #define DEBUG_TYPE "lazy-value-info" 46 47 // This is the number of worklist items we will process to try to discover an 48 // answer for a given value. 49 static const unsigned MaxProcessedPerValue = 500; 50 51 char LazyValueInfoWrapperPass::ID = 0; 52 LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) { 53 initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry()); 54 } 55 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info", 56 "Lazy Value Information Analysis", false, true) 57 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 58 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 59 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info", 60 "Lazy Value Information Analysis", false, true) 61 62 namespace llvm { 63 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); } 64 } 65 66 AnalysisKey LazyValueAnalysis::Key; 67 68 /// Returns true if this lattice value represents at most one possible value. 69 /// This is as precise as any lattice value can get while still representing 70 /// reachable code. 71 static bool hasSingleValue(const ValueLatticeElement &Val) { 72 if (Val.isConstantRange() && 73 Val.getConstantRange().isSingleElement()) 74 // Integer constants are single element ranges 75 return true; 76 if (Val.isConstant()) 77 // Non integer constants 78 return true; 79 return false; 80 } 81 82 /// Combine two sets of facts about the same value into a single set of 83 /// facts. Note that this method is not suitable for merging facts along 84 /// different paths in a CFG; that's what the mergeIn function is for. This 85 /// is for merging facts gathered about the same value at the same location 86 /// through two independent means. 87 /// Notes: 88 /// * This method does not promise to return the most precise possible lattice 89 /// value implied by A and B. It is allowed to return any lattice element 90 /// which is at least as strong as *either* A or B (unless our facts 91 /// conflict, see below). 92 /// * Due to unreachable code, the intersection of two lattice values could be 93 /// contradictory. If this happens, we return some valid lattice value so as 94 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but 95 /// we do not make this guarantee. TODO: This would be a useful enhancement. 96 static ValueLatticeElement intersect(const ValueLatticeElement &A, 97 const ValueLatticeElement &B) { 98 // Undefined is the strongest state. It means the value is known to be along 99 // an unreachable path. 100 if (A.isUnknown()) 101 return A; 102 if (B.isUnknown()) 103 return B; 104 105 // If we gave up for one, but got a useable fact from the other, use it. 106 if (A.isOverdefined()) 107 return B; 108 if (B.isOverdefined()) 109 return A; 110 111 // Can't get any more precise than constants. 112 if (hasSingleValue(A)) 113 return A; 114 if (hasSingleValue(B)) 115 return B; 116 117 // Could be either constant range or not constant here. 118 if (!A.isConstantRange() || !B.isConstantRange()) { 119 // TODO: Arbitrary choice, could be improved 120 return A; 121 } 122 123 // Intersect two constant ranges 124 ConstantRange Range = 125 A.getConstantRange().intersectWith(B.getConstantRange()); 126 // Note: An empty range is implicitly converted to unknown or undef depending 127 // on MayIncludeUndef internally. 128 return ValueLatticeElement::getRange( 129 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() || 130 B.isConstantRangeIncludingUndef()); 131 } 132 133 //===----------------------------------------------------------------------===// 134 // LazyValueInfoCache Decl 135 //===----------------------------------------------------------------------===// 136 137 namespace { 138 /// A callback value handle updates the cache when values are erased. 139 class LazyValueInfoCache; 140 struct LVIValueHandle final : public CallbackVH { 141 LazyValueInfoCache *Parent; 142 143 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr) 144 : CallbackVH(V), Parent(P) { } 145 146 void deleted() override; 147 void allUsesReplacedWith(Value *V) override { 148 deleted(); 149 } 150 }; 151 } // end anonymous namespace 152 153 namespace { 154 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>; 155 156 /// This is the cache kept by LazyValueInfo which 157 /// maintains information about queries across the clients' queries. 158 class LazyValueInfoCache { 159 /// This is all of the cached information for one basic block. It contains 160 /// the per-value lattice elements, as well as a separate set for 161 /// overdefined values to reduce memory usage. Additionally pointers 162 /// dereferenced in the block are cached for nullability queries. 163 struct BlockCacheEntry { 164 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements; 165 SmallDenseSet<AssertingVH<Value>, 4> OverDefined; 166 // None indicates that the nonnull pointers for this basic block 167 // block have not been computed yet. 168 Optional<NonNullPointerSet> NonNullPointers; 169 }; 170 171 /// Cached information per basic block. 172 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>> 173 BlockCache; 174 /// Set of value handles used to erase values from the cache on deletion. 175 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles; 176 177 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const { 178 auto It = BlockCache.find_as(BB); 179 if (It == BlockCache.end()) 180 return nullptr; 181 return It->second.get(); 182 } 183 184 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) { 185 auto It = BlockCache.find_as(BB); 186 if (It == BlockCache.end()) 187 It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() }) 188 .first; 189 190 return It->second.get(); 191 } 192 193 void addValueHandle(Value *Val) { 194 auto HandleIt = ValueHandles.find_as(Val); 195 if (HandleIt == ValueHandles.end()) 196 ValueHandles.insert({ Val, this }); 197 } 198 199 public: 200 void insertResult(Value *Val, BasicBlock *BB, 201 const ValueLatticeElement &Result) { 202 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB); 203 204 // Insert over-defined values into their own cache to reduce memory 205 // overhead. 206 if (Result.isOverdefined()) 207 Entry->OverDefined.insert(Val); 208 else 209 Entry->LatticeElements.insert({ Val, Result }); 210 211 addValueHandle(Val); 212 } 213 214 Optional<ValueLatticeElement> getCachedValueInfo(Value *V, 215 BasicBlock *BB) const { 216 const BlockCacheEntry *Entry = getBlockEntry(BB); 217 if (!Entry) 218 return None; 219 220 if (Entry->OverDefined.count(V)) 221 return ValueLatticeElement::getOverdefined(); 222 223 auto LatticeIt = Entry->LatticeElements.find_as(V); 224 if (LatticeIt == Entry->LatticeElements.end()) 225 return None; 226 227 return LatticeIt->second; 228 } 229 230 bool isNonNullAtEndOfBlock( 231 Value *V, BasicBlock *BB, 232 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) { 233 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB); 234 if (!Entry->NonNullPointers) { 235 Entry->NonNullPointers = InitFn(BB); 236 for (Value *V : *Entry->NonNullPointers) 237 addValueHandle(V); 238 } 239 240 return Entry->NonNullPointers->count(V); 241 } 242 243 /// clear - Empty the cache. 244 void clear() { 245 BlockCache.clear(); 246 ValueHandles.clear(); 247 } 248 249 /// Inform the cache that a given value has been deleted. 250 void eraseValue(Value *V); 251 252 /// This is part of the update interface to inform the cache 253 /// that a block has been deleted. 254 void eraseBlock(BasicBlock *BB); 255 256 /// Updates the cache to remove any influence an overdefined value in 257 /// OldSucc might have (unless also overdefined in NewSucc). This just 258 /// flushes elements from the cache and does not add any. 259 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc); 260 }; 261 } 262 263 void LazyValueInfoCache::eraseValue(Value *V) { 264 for (auto &Pair : BlockCache) { 265 Pair.second->LatticeElements.erase(V); 266 Pair.second->OverDefined.erase(V); 267 if (Pair.second->NonNullPointers) 268 Pair.second->NonNullPointers->erase(V); 269 } 270 271 auto HandleIt = ValueHandles.find_as(V); 272 if (HandleIt != ValueHandles.end()) 273 ValueHandles.erase(HandleIt); 274 } 275 276 void LVIValueHandle::deleted() { 277 // This erasure deallocates *this, so it MUST happen after we're done 278 // using any and all members of *this. 279 Parent->eraseValue(*this); 280 } 281 282 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) { 283 BlockCache.erase(BB); 284 } 285 286 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc, 287 BasicBlock *NewSucc) { 288 // When an edge in the graph has been threaded, values that we could not 289 // determine a value for before (i.e. were marked overdefined) may be 290 // possible to solve now. We do NOT try to proactively update these values. 291 // Instead, we clear their entries from the cache, and allow lazy updating to 292 // recompute them when needed. 293 294 // The updating process is fairly simple: we need to drop cached info 295 // for all values that were marked overdefined in OldSucc, and for those same 296 // values in any successor of OldSucc (except NewSucc) in which they were 297 // also marked overdefined. 298 std::vector<BasicBlock*> worklist; 299 worklist.push_back(OldSucc); 300 301 const BlockCacheEntry *Entry = getBlockEntry(OldSucc); 302 if (!Entry || Entry->OverDefined.empty()) 303 return; // Nothing to process here. 304 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(), 305 Entry->OverDefined.end()); 306 307 // Use a worklist to perform a depth-first search of OldSucc's successors. 308 // NOTE: We do not need a visited list since any blocks we have already 309 // visited will have had their overdefined markers cleared already, and we 310 // thus won't loop to their successors. 311 while (!worklist.empty()) { 312 BasicBlock *ToUpdate = worklist.back(); 313 worklist.pop_back(); 314 315 // Skip blocks only accessible through NewSucc. 316 if (ToUpdate == NewSucc) continue; 317 318 // If a value was marked overdefined in OldSucc, and is here too... 319 auto OI = BlockCache.find_as(ToUpdate); 320 if (OI == BlockCache.end() || OI->second->OverDefined.empty()) 321 continue; 322 auto &ValueSet = OI->second->OverDefined; 323 324 bool changed = false; 325 for (Value *V : ValsToClear) { 326 if (!ValueSet.erase(V)) 327 continue; 328 329 // If we removed anything, then we potentially need to update 330 // blocks successors too. 331 changed = true; 332 } 333 334 if (!changed) continue; 335 336 llvm::append_range(worklist, successors(ToUpdate)); 337 } 338 } 339 340 341 namespace { 342 /// An assembly annotator class to print LazyValueCache information in 343 /// comments. 344 class LazyValueInfoImpl; 345 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter { 346 LazyValueInfoImpl *LVIImpl; 347 // While analyzing which blocks we can solve values for, we need the dominator 348 // information. 349 DominatorTree &DT; 350 351 public: 352 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree) 353 : LVIImpl(L), DT(DTree) {} 354 355 void emitBasicBlockStartAnnot(const BasicBlock *BB, 356 formatted_raw_ostream &OS) override; 357 358 void emitInstructionAnnot(const Instruction *I, 359 formatted_raw_ostream &OS) override; 360 }; 361 } 362 namespace { 363 // The actual implementation of the lazy analysis and update. Note that the 364 // inheritance from LazyValueInfoCache is intended to be temporary while 365 // splitting the code and then transitioning to a has-a relationship. 366 class LazyValueInfoImpl { 367 368 /// Cached results from previous queries 369 LazyValueInfoCache TheCache; 370 371 /// This stack holds the state of the value solver during a query. 372 /// It basically emulates the callstack of the naive 373 /// recursive value lookup process. 374 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack; 375 376 /// Keeps track of which block-value pairs are in BlockValueStack. 377 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet; 378 379 /// Push BV onto BlockValueStack unless it's already in there. 380 /// Returns true on success. 381 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) { 382 if (!BlockValueSet.insert(BV).second) 383 return false; // It's already in the stack. 384 385 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in " 386 << BV.first->getName() << "\n"); 387 BlockValueStack.push_back(BV); 388 return true; 389 } 390 391 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls. 392 const DataLayout &DL; ///< A mandatory DataLayout 393 394 /// Declaration of the llvm.experimental.guard() intrinsic, 395 /// if it exists in the module. 396 Function *GuardDecl; 397 398 Optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB, 399 Instruction *CxtI); 400 Optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F, 401 BasicBlock *T, Instruction *CxtI = nullptr); 402 403 // These methods process one work item and may add more. A false value 404 // returned means that the work item was not completely processed and must 405 // be revisited after going through the new items. 406 bool solveBlockValue(Value *Val, BasicBlock *BB); 407 Optional<ValueLatticeElement> solveBlockValueImpl(Value *Val, BasicBlock *BB); 408 Optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val, 409 BasicBlock *BB); 410 Optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN, 411 BasicBlock *BB); 412 Optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S, 413 BasicBlock *BB); 414 Optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI, 415 BasicBlock *BB); 416 Optional<ValueLatticeElement> solveBlockValueBinaryOpImpl( 417 Instruction *I, BasicBlock *BB, 418 std::function<ConstantRange(const ConstantRange &, 419 const ConstantRange &)> OpFn); 420 Optional<ValueLatticeElement> solveBlockValueBinaryOp(BinaryOperator *BBI, 421 BasicBlock *BB); 422 Optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI, 423 BasicBlock *BB); 424 Optional<ValueLatticeElement> solveBlockValueOverflowIntrinsic( 425 WithOverflowInst *WO, BasicBlock *BB); 426 Optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II, 427 BasicBlock *BB); 428 Optional<ValueLatticeElement> solveBlockValueExtractValue( 429 ExtractValueInst *EVI, BasicBlock *BB); 430 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB); 431 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val, 432 ValueLatticeElement &BBLV, 433 Instruction *BBI); 434 435 void solve(); 436 437 public: 438 /// This is the query interface to determine the lattice value for the 439 /// specified Value* at the context instruction (if specified) or at the 440 /// start of the block. 441 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, 442 Instruction *CxtI = nullptr); 443 444 /// This is the query interface to determine the lattice value for the 445 /// specified Value* at the specified instruction using only information 446 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no 447 /// recursive query is performed. 448 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI); 449 450 /// This is the query interface to determine the lattice 451 /// value for the specified Value* that is true on the specified edge. 452 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, 453 BasicBlock *ToBB, 454 Instruction *CxtI = nullptr); 455 456 /// Complete flush all previously computed values 457 void clear() { 458 TheCache.clear(); 459 } 460 461 /// Printing the LazyValueInfo Analysis. 462 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) { 463 LazyValueInfoAnnotatedWriter Writer(this, DTree); 464 F.print(OS, &Writer); 465 } 466 467 /// This is part of the update interface to inform the cache 468 /// that a block has been deleted. 469 void eraseBlock(BasicBlock *BB) { 470 TheCache.eraseBlock(BB); 471 } 472 473 /// This is the update interface to inform the cache that an edge from 474 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc. 475 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc); 476 477 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, 478 Function *GuardDecl) 479 : AC(AC), DL(DL), GuardDecl(GuardDecl) {} 480 }; 481 } // end anonymous namespace 482 483 484 void LazyValueInfoImpl::solve() { 485 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack( 486 BlockValueStack.begin(), BlockValueStack.end()); 487 488 unsigned processedCount = 0; 489 while (!BlockValueStack.empty()) { 490 processedCount++; 491 // Abort if we have to process too many values to get a result for this one. 492 // Because of the design of the overdefined cache currently being per-block 493 // to avoid naming-related issues (IE it wants to try to give different 494 // results for the same name in different blocks), overdefined results don't 495 // get cached globally, which in turn means we will often try to rediscover 496 // the same overdefined result again and again. Once something like 497 // PredicateInfo is used in LVI or CVP, we should be able to make the 498 // overdefined cache global, and remove this throttle. 499 if (processedCount > MaxProcessedPerValue) { 500 LLVM_DEBUG( 501 dbgs() << "Giving up on stack because we are getting too deep\n"); 502 // Fill in the original values 503 while (!StartingStack.empty()) { 504 std::pair<BasicBlock *, Value *> &e = StartingStack.back(); 505 TheCache.insertResult(e.second, e.first, 506 ValueLatticeElement::getOverdefined()); 507 StartingStack.pop_back(); 508 } 509 BlockValueSet.clear(); 510 BlockValueStack.clear(); 511 return; 512 } 513 std::pair<BasicBlock *, Value *> e = BlockValueStack.back(); 514 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!"); 515 516 if (solveBlockValue(e.second, e.first)) { 517 // The work item was completely processed. 518 assert(BlockValueStack.back() == e && "Nothing should have been pushed!"); 519 #ifndef NDEBUG 520 Optional<ValueLatticeElement> BBLV = 521 TheCache.getCachedValueInfo(e.second, e.first); 522 assert(BBLV && "Result should be in cache!"); 523 LLVM_DEBUG( 524 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = " 525 << *BBLV << "\n"); 526 #endif 527 528 BlockValueStack.pop_back(); 529 BlockValueSet.erase(e); 530 } else { 531 // More work needs to be done before revisiting. 532 assert(BlockValueStack.back() != e && "Stack should have been pushed!"); 533 } 534 } 535 } 536 537 Optional<ValueLatticeElement> LazyValueInfoImpl::getBlockValue( 538 Value *Val, BasicBlock *BB, Instruction *CxtI) { 539 // If already a constant, there is nothing to compute. 540 if (Constant *VC = dyn_cast<Constant>(Val)) 541 return ValueLatticeElement::get(VC); 542 543 if (Optional<ValueLatticeElement> OptLatticeVal = 544 TheCache.getCachedValueInfo(Val, BB)) { 545 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI); 546 return OptLatticeVal; 547 } 548 549 // We have hit a cycle, assume overdefined. 550 if (!pushBlockValue({ BB, Val })) 551 return ValueLatticeElement::getOverdefined(); 552 553 // Yet to be resolved. 554 return None; 555 } 556 557 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) { 558 switch (BBI->getOpcode()) { 559 default: break; 560 case Instruction::Load: 561 case Instruction::Call: 562 case Instruction::Invoke: 563 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range)) 564 if (isa<IntegerType>(BBI->getType())) { 565 return ValueLatticeElement::getRange( 566 getConstantRangeFromMetadata(*Ranges)); 567 } 568 break; 569 }; 570 // Nothing known - will be intersected with other facts 571 return ValueLatticeElement::getOverdefined(); 572 } 573 574 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) { 575 assert(!isa<Constant>(Val) && "Value should not be constant"); 576 assert(!TheCache.getCachedValueInfo(Val, BB) && 577 "Value should not be in cache"); 578 579 // Hold off inserting this value into the Cache in case we have to return 580 // false and come back later. 581 Optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB); 582 if (!Res) 583 // Work pushed, will revisit 584 return false; 585 586 TheCache.insertResult(Val, BB, *Res); 587 return true; 588 } 589 590 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueImpl( 591 Value *Val, BasicBlock *BB) { 592 Instruction *BBI = dyn_cast<Instruction>(Val); 593 if (!BBI || BBI->getParent() != BB) 594 return solveBlockValueNonLocal(Val, BB); 595 596 if (PHINode *PN = dyn_cast<PHINode>(BBI)) 597 return solveBlockValuePHINode(PN, BB); 598 599 if (auto *SI = dyn_cast<SelectInst>(BBI)) 600 return solveBlockValueSelect(SI, BB); 601 602 // If this value is a nonnull pointer, record it's range and bailout. Note 603 // that for all other pointer typed values, we terminate the search at the 604 // definition. We could easily extend this to look through geps, bitcasts, 605 // and the like to prove non-nullness, but it's not clear that's worth it 606 // compile time wise. The context-insensitive value walk done inside 607 // isKnownNonZero gets most of the profitable cases at much less expense. 608 // This does mean that we have a sensitivity to where the defining 609 // instruction is placed, even if it could legally be hoisted much higher. 610 // That is unfortunate. 611 PointerType *PT = dyn_cast<PointerType>(BBI->getType()); 612 if (PT && isKnownNonZero(BBI, DL)) 613 return ValueLatticeElement::getNot(ConstantPointerNull::get(PT)); 614 615 if (BBI->getType()->isIntegerTy()) { 616 if (auto *CI = dyn_cast<CastInst>(BBI)) 617 return solveBlockValueCast(CI, BB); 618 619 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI)) 620 return solveBlockValueBinaryOp(BO, BB); 621 622 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI)) 623 return solveBlockValueExtractValue(EVI, BB); 624 625 if (auto *II = dyn_cast<IntrinsicInst>(BBI)) 626 return solveBlockValueIntrinsic(II, BB); 627 } 628 629 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 630 << "' - unknown inst def found.\n"); 631 return getFromRangeMetadata(BBI); 632 } 633 634 static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) { 635 // TODO: Use NullPointerIsDefined instead. 636 if (Ptr->getType()->getPointerAddressSpace() == 0) 637 PtrSet.insert(getUnderlyingObject(Ptr)); 638 } 639 640 static void AddNonNullPointersByInstruction( 641 Instruction *I, NonNullPointerSet &PtrSet) { 642 if (LoadInst *L = dyn_cast<LoadInst>(I)) { 643 AddNonNullPointer(L->getPointerOperand(), PtrSet); 644 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) { 645 AddNonNullPointer(S->getPointerOperand(), PtrSet); 646 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) { 647 if (MI->isVolatile()) return; 648 649 // FIXME: check whether it has a valuerange that excludes zero? 650 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength()); 651 if (!Len || Len->isZero()) return; 652 653 AddNonNullPointer(MI->getRawDest(), PtrSet); 654 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 655 AddNonNullPointer(MTI->getRawSource(), PtrSet); 656 } 657 } 658 659 bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) { 660 if (NullPointerIsDefined(BB->getParent(), 661 Val->getType()->getPointerAddressSpace())) 662 return false; 663 664 Val = Val->stripInBoundsOffsets(); 665 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) { 666 NonNullPointerSet NonNullPointers; 667 for (Instruction &I : *BB) 668 AddNonNullPointersByInstruction(&I, NonNullPointers); 669 return NonNullPointers; 670 }); 671 } 672 673 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueNonLocal( 674 Value *Val, BasicBlock *BB) { 675 ValueLatticeElement Result; // Start Undefined. 676 677 // If this is the entry block, we must be asking about an argument. The 678 // value is overdefined. 679 if (BB->isEntryBlock()) { 680 assert(isa<Argument>(Val) && "Unknown live-in to the entry block"); 681 return ValueLatticeElement::getOverdefined(); 682 } 683 684 // Loop over all of our predecessors, merging what we know from them into 685 // result. If we encounter an unexplored predecessor, we eagerly explore it 686 // in a depth first manner. In practice, this has the effect of discovering 687 // paths we can't analyze eagerly without spending compile times analyzing 688 // other paths. This heuristic benefits from the fact that predecessors are 689 // frequently arranged such that dominating ones come first and we quickly 690 // find a path to function entry. TODO: We should consider explicitly 691 // canonicalizing to make this true rather than relying on this happy 692 // accident. 693 for (BasicBlock *Pred : predecessors(BB)) { 694 Optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB); 695 if (!EdgeResult) 696 // Explore that input, then return here 697 return None; 698 699 Result.mergeIn(*EdgeResult); 700 701 // If we hit overdefined, exit early. The BlockVals entry is already set 702 // to overdefined. 703 if (Result.isOverdefined()) { 704 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 705 << "' - overdefined because of pred (non local).\n"); 706 return Result; 707 } 708 } 709 710 // Return the merged value, which is more precise than 'overdefined'. 711 assert(!Result.isOverdefined()); 712 return Result; 713 } 714 715 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValuePHINode( 716 PHINode *PN, BasicBlock *BB) { 717 ValueLatticeElement Result; // Start Undefined. 718 719 // Loop over all of our predecessors, merging what we know from them into 720 // result. See the comment about the chosen traversal order in 721 // solveBlockValueNonLocal; the same reasoning applies here. 722 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 723 BasicBlock *PhiBB = PN->getIncomingBlock(i); 724 Value *PhiVal = PN->getIncomingValue(i); 725 // Note that we can provide PN as the context value to getEdgeValue, even 726 // though the results will be cached, because PN is the value being used as 727 // the cache key in the caller. 728 Optional<ValueLatticeElement> EdgeResult = 729 getEdgeValue(PhiVal, PhiBB, BB, PN); 730 if (!EdgeResult) 731 // Explore that input, then return here 732 return None; 733 734 Result.mergeIn(*EdgeResult); 735 736 // If we hit overdefined, exit early. The BlockVals entry is already set 737 // to overdefined. 738 if (Result.isOverdefined()) { 739 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 740 << "' - overdefined because of pred (local).\n"); 741 742 return Result; 743 } 744 } 745 746 // Return the merged value, which is more precise than 'overdefined'. 747 assert(!Result.isOverdefined() && "Possible PHI in entry block?"); 748 return Result; 749 } 750 751 static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond, 752 bool isTrueDest = true); 753 754 // If we can determine a constraint on the value given conditions assumed by 755 // the program, intersect those constraints with BBLV 756 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange( 757 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) { 758 BBI = BBI ? BBI : dyn_cast<Instruction>(Val); 759 if (!BBI) 760 return; 761 762 BasicBlock *BB = BBI->getParent(); 763 for (auto &AssumeVH : AC->assumptionsFor(Val)) { 764 if (!AssumeVH) 765 continue; 766 767 // Only check assumes in the block of the context instruction. Other 768 // assumes will have already been taken into account when the value was 769 // propagated from predecessor blocks. 770 auto *I = cast<CallInst>(AssumeVH); 771 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI)) 772 continue; 773 774 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0))); 775 } 776 777 // If guards are not used in the module, don't spend time looking for them 778 if (GuardDecl && !GuardDecl->use_empty() && 779 BBI->getIterator() != BB->begin()) { 780 for (Instruction &I : make_range(std::next(BBI->getIterator().getReverse()), 781 BB->rend())) { 782 Value *Cond = nullptr; 783 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond)))) 784 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond)); 785 } 786 } 787 788 if (BBLV.isOverdefined()) { 789 // Check whether we're checking at the terminator, and the pointer has 790 // been dereferenced in this block. 791 PointerType *PTy = dyn_cast<PointerType>(Val->getType()); 792 if (PTy && BB->getTerminator() == BBI && 793 isNonNullAtEndOfBlock(Val, BB)) 794 BBLV = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy)); 795 } 796 } 797 798 static ConstantRange getConstantRangeOrFull(const ValueLatticeElement &Val, 799 Type *Ty, const DataLayout &DL) { 800 if (Val.isConstantRange()) 801 return Val.getConstantRange(); 802 return ConstantRange::getFull(DL.getTypeSizeInBits(Ty)); 803 } 804 805 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueSelect( 806 SelectInst *SI, BasicBlock *BB) { 807 // Recurse on our inputs if needed 808 Optional<ValueLatticeElement> OptTrueVal = 809 getBlockValue(SI->getTrueValue(), BB, SI); 810 if (!OptTrueVal) 811 return None; 812 ValueLatticeElement &TrueVal = *OptTrueVal; 813 814 Optional<ValueLatticeElement> OptFalseVal = 815 getBlockValue(SI->getFalseValue(), BB, SI); 816 if (!OptFalseVal) 817 return None; 818 ValueLatticeElement &FalseVal = *OptFalseVal; 819 820 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) { 821 const ConstantRange &TrueCR = 822 getConstantRangeOrFull(TrueVal, SI->getType(), DL); 823 const ConstantRange &FalseCR = 824 getConstantRangeOrFull(FalseVal, SI->getType(), DL); 825 Value *LHS = nullptr; 826 Value *RHS = nullptr; 827 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS); 828 // Is this a min specifically of our two inputs? (Avoid the risk of 829 // ValueTracking getting smarter looking back past our immediate inputs.) 830 if (SelectPatternResult::isMinOrMax(SPR.Flavor) && 831 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) || 832 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) { 833 ConstantRange ResultCR = [&]() { 834 switch (SPR.Flavor) { 835 default: 836 llvm_unreachable("unexpected minmax type!"); 837 case SPF_SMIN: /// Signed minimum 838 return TrueCR.smin(FalseCR); 839 case SPF_UMIN: /// Unsigned minimum 840 return TrueCR.umin(FalseCR); 841 case SPF_SMAX: /// Signed maximum 842 return TrueCR.smax(FalseCR); 843 case SPF_UMAX: /// Unsigned maximum 844 return TrueCR.umax(FalseCR); 845 }; 846 }(); 847 return ValueLatticeElement::getRange( 848 ResultCR, TrueVal.isConstantRangeIncludingUndef() || 849 FalseVal.isConstantRangeIncludingUndef()); 850 } 851 852 if (SPR.Flavor == SPF_ABS) { 853 if (LHS == SI->getTrueValue()) 854 return ValueLatticeElement::getRange( 855 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef()); 856 if (LHS == SI->getFalseValue()) 857 return ValueLatticeElement::getRange( 858 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef()); 859 } 860 861 if (SPR.Flavor == SPF_NABS) { 862 ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth())); 863 if (LHS == SI->getTrueValue()) 864 return ValueLatticeElement::getRange( 865 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef()); 866 if (LHS == SI->getFalseValue()) 867 return ValueLatticeElement::getRange( 868 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef()); 869 } 870 } 871 872 // Can we constrain the facts about the true and false values by using the 873 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5). 874 // TODO: We could potentially refine an overdefined true value above. 875 Value *Cond = SI->getCondition(); 876 TrueVal = intersect(TrueVal, 877 getValueFromCondition(SI->getTrueValue(), Cond, true)); 878 FalseVal = intersect(FalseVal, 879 getValueFromCondition(SI->getFalseValue(), Cond, false)); 880 881 ValueLatticeElement Result = TrueVal; 882 Result.mergeIn(FalseVal); 883 return Result; 884 } 885 886 Optional<ConstantRange> LazyValueInfoImpl::getRangeFor(Value *V, 887 Instruction *CxtI, 888 BasicBlock *BB) { 889 Optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI); 890 if (!OptVal) 891 return None; 892 return getConstantRangeOrFull(*OptVal, V->getType(), DL); 893 } 894 895 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast( 896 CastInst *CI, BasicBlock *BB) { 897 // Without knowing how wide the input is, we can't analyze it in any useful 898 // way. 899 if (!CI->getOperand(0)->getType()->isSized()) 900 return ValueLatticeElement::getOverdefined(); 901 902 // Filter out casts we don't know how to reason about before attempting to 903 // recurse on our operand. This can cut a long search short if we know we're 904 // not going to be able to get any useful information anways. 905 switch (CI->getOpcode()) { 906 case Instruction::Trunc: 907 case Instruction::SExt: 908 case Instruction::ZExt: 909 case Instruction::BitCast: 910 break; 911 default: 912 // Unhandled instructions are overdefined. 913 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 914 << "' - overdefined (unknown cast).\n"); 915 return ValueLatticeElement::getOverdefined(); 916 } 917 918 // Figure out the range of the LHS. If that fails, we still apply the 919 // transfer rule on the full set since we may be able to locally infer 920 // interesting facts. 921 Optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB); 922 if (!LHSRes.hasValue()) 923 // More work to do before applying this transfer rule. 924 return None; 925 const ConstantRange &LHSRange = LHSRes.getValue(); 926 927 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth(); 928 929 // NOTE: We're currently limited by the set of operations that ConstantRange 930 // can evaluate symbolically. Enhancing that set will allows us to analyze 931 // more definitions. 932 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(), 933 ResultBitWidth)); 934 } 935 936 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl( 937 Instruction *I, BasicBlock *BB, 938 std::function<ConstantRange(const ConstantRange &, 939 const ConstantRange &)> OpFn) { 940 // Figure out the ranges of the operands. If that fails, use a 941 // conservative range, but apply the transfer rule anyways. This 942 // lets us pick up facts from expressions like "and i32 (call i32 943 // @foo()), 32" 944 Optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB); 945 Optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB); 946 if (!LHSRes.hasValue() || !RHSRes.hasValue()) 947 // More work to do before applying this transfer rule. 948 return None; 949 950 const ConstantRange &LHSRange = LHSRes.getValue(); 951 const ConstantRange &RHSRange = RHSRes.getValue(); 952 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange)); 953 } 954 955 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOp( 956 BinaryOperator *BO, BasicBlock *BB) { 957 assert(BO->getOperand(0)->getType()->isSized() && 958 "all operands to binary operators are sized"); 959 if (BO->getOpcode() == Instruction::Xor) { 960 // Xor is the only operation not supported by ConstantRange::binaryOp(). 961 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 962 << "' - overdefined (unknown binary operator).\n"); 963 return ValueLatticeElement::getOverdefined(); 964 } 965 966 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) { 967 unsigned NoWrapKind = 0; 968 if (OBO->hasNoUnsignedWrap()) 969 NoWrapKind |= OverflowingBinaryOperator::NoUnsignedWrap; 970 if (OBO->hasNoSignedWrap()) 971 NoWrapKind |= OverflowingBinaryOperator::NoSignedWrap; 972 973 return solveBlockValueBinaryOpImpl( 974 BO, BB, 975 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) { 976 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind); 977 }); 978 } 979 980 return solveBlockValueBinaryOpImpl( 981 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) { 982 return CR1.binaryOp(BO->getOpcode(), CR2); 983 }); 984 } 985 986 Optional<ValueLatticeElement> 987 LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, 988 BasicBlock *BB) { 989 return solveBlockValueBinaryOpImpl( 990 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) { 991 return CR1.binaryOp(WO->getBinaryOp(), CR2); 992 }); 993 } 994 995 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueIntrinsic( 996 IntrinsicInst *II, BasicBlock *BB) { 997 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) { 998 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 999 << "' - unknown intrinsic.\n"); 1000 return getFromRangeMetadata(II); 1001 } 1002 1003 SmallVector<ConstantRange, 2> OpRanges; 1004 for (Value *Op : II->args()) { 1005 Optional<ConstantRange> Range = getRangeFor(Op, II, BB); 1006 if (!Range) 1007 return None; 1008 OpRanges.push_back(*Range); 1009 } 1010 1011 return ValueLatticeElement::getRange( 1012 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges)); 1013 } 1014 1015 Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueExtractValue( 1016 ExtractValueInst *EVI, BasicBlock *BB) { 1017 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand())) 1018 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0) 1019 return solveBlockValueOverflowIntrinsic(WO, BB); 1020 1021 // Handle extractvalue of insertvalue to allow further simplification 1022 // based on replaced with.overflow intrinsics. 1023 if (Value *V = SimplifyExtractValueInst( 1024 EVI->getAggregateOperand(), EVI->getIndices(), 1025 EVI->getModule()->getDataLayout())) 1026 return getBlockValue(V, BB, EVI); 1027 1028 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 1029 << "' - overdefined (unknown extractvalue).\n"); 1030 return ValueLatticeElement::getOverdefined(); 1031 } 1032 1033 static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, 1034 ICmpInst::Predicate Pred) { 1035 if (LHS == Val) 1036 return true; 1037 1038 // Handle range checking idiom produced by InstCombine. We will subtract the 1039 // offset from the allowed range for RHS in this case. 1040 const APInt *C; 1041 if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) { 1042 Offset = *C; 1043 return true; 1044 } 1045 1046 // Handle the symmetric case. This appears in saturation patterns like 1047 // (x == 16) ? 16 : (x + 1). 1048 if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) { 1049 Offset = -*C; 1050 return true; 1051 } 1052 1053 // If (x | y) < C, then (x < C) && (y < C). 1054 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) && 1055 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)) 1056 return true; 1057 1058 // If (x & y) > C, then (x > C) && (y > C). 1059 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) && 1060 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)) 1061 return true; 1062 1063 return false; 1064 } 1065 1066 /// Get value range for a "(Val + Offset) Pred RHS" condition. 1067 static ValueLatticeElement getValueFromSimpleICmpCondition( 1068 CmpInst::Predicate Pred, Value *RHS, const APInt &Offset) { 1069 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(), 1070 /*isFullSet=*/true); 1071 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) 1072 RHSRange = ConstantRange(CI->getValue()); 1073 else if (Instruction *I = dyn_cast<Instruction>(RHS)) 1074 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range)) 1075 RHSRange = getConstantRangeFromMetadata(*Ranges); 1076 1077 ConstantRange TrueValues = 1078 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange); 1079 return ValueLatticeElement::getRange(TrueValues.subtract(Offset)); 1080 } 1081 1082 static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI, 1083 bool isTrueDest) { 1084 Value *LHS = ICI->getOperand(0); 1085 Value *RHS = ICI->getOperand(1); 1086 1087 // Get the predicate that must hold along the considered edge. 1088 CmpInst::Predicate EdgePred = 1089 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate(); 1090 1091 if (isa<Constant>(RHS)) { 1092 if (ICI->isEquality() && LHS == Val) { 1093 if (EdgePred == ICmpInst::ICMP_EQ) 1094 return ValueLatticeElement::get(cast<Constant>(RHS)); 1095 else if (!isa<UndefValue>(RHS)) 1096 return ValueLatticeElement::getNot(cast<Constant>(RHS)); 1097 } 1098 } 1099 1100 Type *Ty = Val->getType(); 1101 if (!Ty->isIntegerTy()) 1102 return ValueLatticeElement::getOverdefined(); 1103 1104 unsigned BitWidth = Ty->getScalarSizeInBits(); 1105 APInt Offset(BitWidth, 0); 1106 if (matchICmpOperand(Offset, LHS, Val, EdgePred)) 1107 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset); 1108 1109 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred); 1110 if (matchICmpOperand(Offset, RHS, Val, SwappedPred)) 1111 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset); 1112 1113 const APInt *Mask, *C; 1114 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) && 1115 match(RHS, m_APInt(C))) { 1116 // If (Val & Mask) == C then all the masked bits are known and we can 1117 // compute a value range based on that. 1118 if (EdgePred == ICmpInst::ICMP_EQ) { 1119 KnownBits Known; 1120 Known.Zero = ~*C & *Mask; 1121 Known.One = *C & *Mask; 1122 return ValueLatticeElement::getRange( 1123 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false)); 1124 } 1125 // If (Val & Mask) != 0 then the value must be larger than the lowest set 1126 // bit of Mask. 1127 if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) { 1128 return ValueLatticeElement::getRange(ConstantRange::getNonEmpty( 1129 APInt::getOneBitSet(BitWidth, Mask->countTrailingZeros()), 1130 APInt::getZero(BitWidth))); 1131 } 1132 } 1133 1134 // If (X urem Modulus) >= C, then X >= C. 1135 // If trunc X >= C, then X >= C. 1136 // TODO: An upper bound could be computed as well. 1137 if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()), 1138 m_Trunc(m_Specific(Val)))) && 1139 match(RHS, m_APInt(C))) { 1140 // Use the icmp region so we don't have to deal with different predicates. 1141 ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C); 1142 if (!CR.isEmptySet()) 1143 return ValueLatticeElement::getRange(ConstantRange::getNonEmpty( 1144 CR.getUnsignedMin().zextOrSelf(BitWidth), APInt(BitWidth, 0))); 1145 } 1146 1147 return ValueLatticeElement::getOverdefined(); 1148 } 1149 1150 // Handle conditions of the form 1151 // extractvalue(op.with.overflow(%x, C), 1). 1152 static ValueLatticeElement getValueFromOverflowCondition( 1153 Value *Val, WithOverflowInst *WO, bool IsTrueDest) { 1154 // TODO: This only works with a constant RHS for now. We could also compute 1155 // the range of the RHS, but this doesn't fit into the current structure of 1156 // the edge value calculation. 1157 const APInt *C; 1158 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C))) 1159 return ValueLatticeElement::getOverdefined(); 1160 1161 // Calculate the possible values of %x for which no overflow occurs. 1162 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion( 1163 WO->getBinaryOp(), *C, WO->getNoWrapKind()); 1164 1165 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is 1166 // constrained to it's inverse (all values that might cause overflow). 1167 if (IsTrueDest) 1168 NWR = NWR.inverse(); 1169 return ValueLatticeElement::getRange(NWR); 1170 } 1171 1172 static Optional<ValueLatticeElement> 1173 getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest, 1174 bool isRevisit, 1175 SmallDenseMap<Value *, ValueLatticeElement> &Visited, 1176 SmallVectorImpl<Value *> &Worklist) { 1177 if (!isRevisit) { 1178 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond)) 1179 return getValueFromICmpCondition(Val, ICI, isTrueDest); 1180 1181 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond)) 1182 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand())) 1183 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1) 1184 return getValueFromOverflowCondition(Val, WO, isTrueDest); 1185 } 1186 1187 Value *L, *R; 1188 bool IsAnd; 1189 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R)))) 1190 IsAnd = true; 1191 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) 1192 IsAnd = false; 1193 else 1194 return ValueLatticeElement::getOverdefined(); 1195 1196 auto LV = Visited.find(L); 1197 auto RV = Visited.find(R); 1198 1199 // if (L && R) -> intersect L and R 1200 // if (!(L || R)) -> intersect L and R 1201 // if (L || R) -> union L and R 1202 // if (!(L && R)) -> union L and R 1203 if ((isTrueDest ^ IsAnd) && (LV != Visited.end())) { 1204 ValueLatticeElement V = LV->second; 1205 if (V.isOverdefined()) 1206 return V; 1207 if (RV != Visited.end()) { 1208 V.mergeIn(RV->second); 1209 return V; 1210 } 1211 } 1212 1213 if (LV == Visited.end() || RV == Visited.end()) { 1214 assert(!isRevisit); 1215 if (LV == Visited.end()) 1216 Worklist.push_back(L); 1217 if (RV == Visited.end()) 1218 Worklist.push_back(R); 1219 return None; 1220 } 1221 1222 return intersect(LV->second, RV->second); 1223 } 1224 1225 ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond, 1226 bool isTrueDest) { 1227 assert(Cond && "precondition"); 1228 SmallDenseMap<Value*, ValueLatticeElement> Visited; 1229 SmallVector<Value *> Worklist; 1230 1231 Worklist.push_back(Cond); 1232 do { 1233 Value *CurrentCond = Worklist.back(); 1234 // Insert an Overdefined placeholder into the set to prevent 1235 // infinite recursion if there exists IRs that use not 1236 // dominated by its def as in this example: 1237 // "%tmp3 = or i1 undef, %tmp4" 1238 // "%tmp4 = or i1 undef, %tmp3" 1239 auto Iter = 1240 Visited.try_emplace(CurrentCond, ValueLatticeElement::getOverdefined()); 1241 bool isRevisit = !Iter.second; 1242 Optional<ValueLatticeElement> Result = getValueFromConditionImpl( 1243 Val, CurrentCond, isTrueDest, isRevisit, Visited, Worklist); 1244 if (Result) { 1245 Visited[CurrentCond] = *Result; 1246 Worklist.pop_back(); 1247 } 1248 } while (!Worklist.empty()); 1249 1250 auto Result = Visited.find(Cond); 1251 assert(Result != Visited.end()); 1252 return Result->second; 1253 } 1254 1255 // Return true if Usr has Op as an operand, otherwise false. 1256 static bool usesOperand(User *Usr, Value *Op) { 1257 return is_contained(Usr->operands(), Op); 1258 } 1259 1260 // Return true if the instruction type of Val is supported by 1261 // constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only. 1262 // Call this before calling constantFoldUser() to find out if it's even worth 1263 // attempting to call it. 1264 static bool isOperationFoldable(User *Usr) { 1265 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr); 1266 } 1267 1268 // Check if Usr can be simplified to an integer constant when the value of one 1269 // of its operands Op is an integer constant OpConstVal. If so, return it as an 1270 // lattice value range with a single element or otherwise return an overdefined 1271 // lattice value. 1272 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, 1273 const APInt &OpConstVal, 1274 const DataLayout &DL) { 1275 assert(isOperationFoldable(Usr) && "Precondition"); 1276 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal); 1277 // Check if Usr can be simplified to a constant. 1278 if (auto *CI = dyn_cast<CastInst>(Usr)) { 1279 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op"); 1280 if (auto *C = dyn_cast_or_null<ConstantInt>( 1281 SimplifyCastInst(CI->getOpcode(), OpConst, 1282 CI->getDestTy(), DL))) { 1283 return ValueLatticeElement::getRange(ConstantRange(C->getValue())); 1284 } 1285 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) { 1286 bool Op0Match = BO->getOperand(0) == Op; 1287 bool Op1Match = BO->getOperand(1) == Op; 1288 assert((Op0Match || Op1Match) && 1289 "Operand 0 nor Operand 1 isn't a match"); 1290 Value *LHS = Op0Match ? OpConst : BO->getOperand(0); 1291 Value *RHS = Op1Match ? OpConst : BO->getOperand(1); 1292 if (auto *C = dyn_cast_or_null<ConstantInt>( 1293 SimplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) { 1294 return ValueLatticeElement::getRange(ConstantRange(C->getValue())); 1295 } 1296 } else if (isa<FreezeInst>(Usr)) { 1297 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op"); 1298 return ValueLatticeElement::getRange(ConstantRange(OpConstVal)); 1299 } 1300 return ValueLatticeElement::getOverdefined(); 1301 } 1302 1303 /// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if 1304 /// Val is not constrained on the edge. Result is unspecified if return value 1305 /// is false. 1306 static Optional<ValueLatticeElement> getEdgeValueLocal(Value *Val, 1307 BasicBlock *BBFrom, 1308 BasicBlock *BBTo) { 1309 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we 1310 // know that v != 0. 1311 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) { 1312 // If this is a conditional branch and only one successor goes to BBTo, then 1313 // we may be able to infer something from the condition. 1314 if (BI->isConditional() && 1315 BI->getSuccessor(0) != BI->getSuccessor(1)) { 1316 bool isTrueDest = BI->getSuccessor(0) == BBTo; 1317 assert(BI->getSuccessor(!isTrueDest) == BBTo && 1318 "BBTo isn't a successor of BBFrom"); 1319 Value *Condition = BI->getCondition(); 1320 1321 // If V is the condition of the branch itself, then we know exactly what 1322 // it is. 1323 if (Condition == Val) 1324 return ValueLatticeElement::get(ConstantInt::get( 1325 Type::getInt1Ty(Val->getContext()), isTrueDest)); 1326 1327 // If the condition of the branch is an equality comparison, we may be 1328 // able to infer the value. 1329 ValueLatticeElement Result = getValueFromCondition(Val, Condition, 1330 isTrueDest); 1331 if (!Result.isOverdefined()) 1332 return Result; 1333 1334 if (User *Usr = dyn_cast<User>(Val)) { 1335 assert(Result.isOverdefined() && "Result isn't overdefined"); 1336 // Check with isOperationFoldable() first to avoid linearly iterating 1337 // over the operands unnecessarily which can be expensive for 1338 // instructions with many operands. 1339 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) { 1340 const DataLayout &DL = BBTo->getModule()->getDataLayout(); 1341 if (usesOperand(Usr, Condition)) { 1342 // If Val has Condition as an operand and Val can be folded into a 1343 // constant with either Condition == true or Condition == false, 1344 // propagate the constant. 1345 // eg. 1346 // ; %Val is true on the edge to %then. 1347 // %Val = and i1 %Condition, true. 1348 // br %Condition, label %then, label %else 1349 APInt ConditionVal(1, isTrueDest ? 1 : 0); 1350 Result = constantFoldUser(Usr, Condition, ConditionVal, DL); 1351 } else { 1352 // If one of Val's operand has an inferred value, we may be able to 1353 // infer the value of Val. 1354 // eg. 1355 // ; %Val is 94 on the edge to %then. 1356 // %Val = add i8 %Op, 1 1357 // %Condition = icmp eq i8 %Op, 93 1358 // br i1 %Condition, label %then, label %else 1359 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) { 1360 Value *Op = Usr->getOperand(i); 1361 ValueLatticeElement OpLatticeVal = 1362 getValueFromCondition(Op, Condition, isTrueDest); 1363 if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) { 1364 Result = constantFoldUser(Usr, Op, OpConst.getValue(), DL); 1365 break; 1366 } 1367 } 1368 } 1369 } 1370 } 1371 if (!Result.isOverdefined()) 1372 return Result; 1373 } 1374 } 1375 1376 // If the edge was formed by a switch on the value, then we may know exactly 1377 // what it is. 1378 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) { 1379 Value *Condition = SI->getCondition(); 1380 if (!isa<IntegerType>(Val->getType())) 1381 return None; 1382 bool ValUsesConditionAndMayBeFoldable = false; 1383 if (Condition != Val) { 1384 // Check if Val has Condition as an operand. 1385 if (User *Usr = dyn_cast<User>(Val)) 1386 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) && 1387 usesOperand(Usr, Condition); 1388 if (!ValUsesConditionAndMayBeFoldable) 1389 return None; 1390 } 1391 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) && 1392 "Condition != Val nor Val doesn't use Condition"); 1393 1394 bool DefaultCase = SI->getDefaultDest() == BBTo; 1395 unsigned BitWidth = Val->getType()->getIntegerBitWidth(); 1396 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/); 1397 1398 for (auto Case : SI->cases()) { 1399 APInt CaseValue = Case.getCaseValue()->getValue(); 1400 ConstantRange EdgeVal(CaseValue); 1401 if (ValUsesConditionAndMayBeFoldable) { 1402 User *Usr = cast<User>(Val); 1403 const DataLayout &DL = BBTo->getModule()->getDataLayout(); 1404 ValueLatticeElement EdgeLatticeVal = 1405 constantFoldUser(Usr, Condition, CaseValue, DL); 1406 if (EdgeLatticeVal.isOverdefined()) 1407 return None; 1408 EdgeVal = EdgeLatticeVal.getConstantRange(); 1409 } 1410 if (DefaultCase) { 1411 // It is possible that the default destination is the destination of 1412 // some cases. We cannot perform difference for those cases. 1413 // We know Condition != CaseValue in BBTo. In some cases we can use 1414 // this to infer Val == f(Condition) is != f(CaseValue). For now, we 1415 // only do this when f is identity (i.e. Val == Condition), but we 1416 // should be able to do this for any injective f. 1417 if (Case.getCaseSuccessor() != BBTo && Condition == Val) 1418 EdgesVals = EdgesVals.difference(EdgeVal); 1419 } else if (Case.getCaseSuccessor() == BBTo) 1420 EdgesVals = EdgesVals.unionWith(EdgeVal); 1421 } 1422 return ValueLatticeElement::getRange(std::move(EdgesVals)); 1423 } 1424 return None; 1425 } 1426 1427 /// Compute the value of Val on the edge BBFrom -> BBTo or the value at 1428 /// the basic block if the edge does not constrain Val. 1429 Optional<ValueLatticeElement> LazyValueInfoImpl::getEdgeValue( 1430 Value *Val, BasicBlock *BBFrom, BasicBlock *BBTo, Instruction *CxtI) { 1431 // If already a constant, there is nothing to compute. 1432 if (Constant *VC = dyn_cast<Constant>(Val)) 1433 return ValueLatticeElement::get(VC); 1434 1435 ValueLatticeElement LocalResult = getEdgeValueLocal(Val, BBFrom, BBTo) 1436 .getValueOr(ValueLatticeElement::getOverdefined()); 1437 if (hasSingleValue(LocalResult)) 1438 // Can't get any more precise here 1439 return LocalResult; 1440 1441 Optional<ValueLatticeElement> OptInBlock = 1442 getBlockValue(Val, BBFrom, BBFrom->getTerminator()); 1443 if (!OptInBlock) 1444 return None; 1445 ValueLatticeElement &InBlock = *OptInBlock; 1446 1447 // We can use the context instruction (generically the ultimate instruction 1448 // the calling pass is trying to simplify) here, even though the result of 1449 // this function is generally cached when called from the solve* functions 1450 // (and that cached result might be used with queries using a different 1451 // context instruction), because when this function is called from the solve* 1452 // functions, the context instruction is not provided. When called from 1453 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided, 1454 // but then the result is not cached. 1455 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI); 1456 1457 return intersect(LocalResult, InBlock); 1458 } 1459 1460 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB, 1461 Instruction *CxtI) { 1462 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '" 1463 << BB->getName() << "'\n"); 1464 1465 assert(BlockValueStack.empty() && BlockValueSet.empty()); 1466 Optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI); 1467 if (!OptResult) { 1468 solve(); 1469 OptResult = getBlockValue(V, BB, CxtI); 1470 assert(OptResult && "Value not available after solving"); 1471 } 1472 1473 ValueLatticeElement Result = *OptResult; 1474 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n"); 1475 return Result; 1476 } 1477 1478 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) { 1479 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName() 1480 << "'\n"); 1481 1482 if (auto *C = dyn_cast<Constant>(V)) 1483 return ValueLatticeElement::get(C); 1484 1485 ValueLatticeElement Result = ValueLatticeElement::getOverdefined(); 1486 if (auto *I = dyn_cast<Instruction>(V)) 1487 Result = getFromRangeMetadata(I); 1488 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI); 1489 1490 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n"); 1491 return Result; 1492 } 1493 1494 ValueLatticeElement LazyValueInfoImpl:: 1495 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, 1496 Instruction *CxtI) { 1497 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '" 1498 << FromBB->getName() << "' to '" << ToBB->getName() 1499 << "'\n"); 1500 1501 Optional<ValueLatticeElement> Result = getEdgeValue(V, FromBB, ToBB, CxtI); 1502 if (!Result) { 1503 solve(); 1504 Result = getEdgeValue(V, FromBB, ToBB, CxtI); 1505 assert(Result && "More work to do after problem solved?"); 1506 } 1507 1508 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n"); 1509 return *Result; 1510 } 1511 1512 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, 1513 BasicBlock *NewSucc) { 1514 TheCache.threadEdgeImpl(OldSucc, NewSucc); 1515 } 1516 1517 //===----------------------------------------------------------------------===// 1518 // LazyValueInfo Impl 1519 //===----------------------------------------------------------------------===// 1520 1521 /// This lazily constructs the LazyValueInfoImpl. 1522 static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC, 1523 const Module *M) { 1524 if (!PImpl) { 1525 assert(M && "getCache() called with a null Module"); 1526 const DataLayout &DL = M->getDataLayout(); 1527 Function *GuardDecl = M->getFunction( 1528 Intrinsic::getName(Intrinsic::experimental_guard)); 1529 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl); 1530 } 1531 return *static_cast<LazyValueInfoImpl*>(PImpl); 1532 } 1533 1534 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) { 1535 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1536 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1537 1538 if (Info.PImpl) 1539 getImpl(Info.PImpl, Info.AC, F.getParent()).clear(); 1540 1541 // Fully lazy. 1542 return false; 1543 } 1544 1545 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1546 AU.setPreservesAll(); 1547 AU.addRequired<AssumptionCacheTracker>(); 1548 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1549 } 1550 1551 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; } 1552 1553 LazyValueInfo::~LazyValueInfo() { releaseMemory(); } 1554 1555 void LazyValueInfo::releaseMemory() { 1556 // If the cache was allocated, free it. 1557 if (PImpl) { 1558 delete &getImpl(PImpl, AC, nullptr); 1559 PImpl = nullptr; 1560 } 1561 } 1562 1563 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA, 1564 FunctionAnalysisManager::Invalidator &Inv) { 1565 // We need to invalidate if we have either failed to preserve this analyses 1566 // result directly or if any of its dependencies have been invalidated. 1567 auto PAC = PA.getChecker<LazyValueAnalysis>(); 1568 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>())) 1569 return true; 1570 1571 return false; 1572 } 1573 1574 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); } 1575 1576 LazyValueInfo LazyValueAnalysis::run(Function &F, 1577 FunctionAnalysisManager &FAM) { 1578 auto &AC = FAM.getResult<AssumptionAnalysis>(F); 1579 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 1580 1581 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI); 1582 } 1583 1584 /// Returns true if we can statically tell that this value will never be a 1585 /// "useful" constant. In practice, this means we've got something like an 1586 /// alloca or a malloc call for which a comparison against a constant can 1587 /// only be guarding dead code. Note that we are potentially giving up some 1588 /// precision in dead code (a constant result) in favour of avoiding a 1589 /// expensive search for a easily answered common query. 1590 static bool isKnownNonConstant(Value *V) { 1591 V = V->stripPointerCasts(); 1592 // The return val of alloc cannot be a Constant. 1593 if (isa<AllocaInst>(V)) 1594 return true; 1595 return false; 1596 } 1597 1598 Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) { 1599 // Bail out early if V is known not to be a Constant. 1600 if (isKnownNonConstant(V)) 1601 return nullptr; 1602 1603 BasicBlock *BB = CxtI->getParent(); 1604 ValueLatticeElement Result = 1605 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI); 1606 1607 if (Result.isConstant()) 1608 return Result.getConstant(); 1609 if (Result.isConstantRange()) { 1610 const ConstantRange &CR = Result.getConstantRange(); 1611 if (const APInt *SingleVal = CR.getSingleElement()) 1612 return ConstantInt::get(V->getContext(), *SingleVal); 1613 } 1614 return nullptr; 1615 } 1616 1617 ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI, 1618 bool UndefAllowed) { 1619 assert(V->getType()->isIntegerTy()); 1620 unsigned Width = V->getType()->getIntegerBitWidth(); 1621 BasicBlock *BB = CxtI->getParent(); 1622 ValueLatticeElement Result = 1623 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI); 1624 if (Result.isUnknown()) 1625 return ConstantRange::getEmpty(Width); 1626 if (Result.isConstantRange(UndefAllowed)) 1627 return Result.getConstantRange(UndefAllowed); 1628 // We represent ConstantInt constants as constant ranges but other kinds 1629 // of integer constants, i.e. ConstantExpr will be tagged as constants 1630 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && 1631 "ConstantInt value must be represented as constantrange"); 1632 return ConstantRange::getFull(Width); 1633 } 1634 1635 /// Determine whether the specified value is known to be a 1636 /// constant on the specified edge. Return null if not. 1637 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB, 1638 BasicBlock *ToBB, 1639 Instruction *CxtI) { 1640 Module *M = FromBB->getModule(); 1641 ValueLatticeElement Result = 1642 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI); 1643 1644 if (Result.isConstant()) 1645 return Result.getConstant(); 1646 if (Result.isConstantRange()) { 1647 const ConstantRange &CR = Result.getConstantRange(); 1648 if (const APInt *SingleVal = CR.getSingleElement()) 1649 return ConstantInt::get(V->getContext(), *SingleVal); 1650 } 1651 return nullptr; 1652 } 1653 1654 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V, 1655 BasicBlock *FromBB, 1656 BasicBlock *ToBB, 1657 Instruction *CxtI) { 1658 unsigned Width = V->getType()->getIntegerBitWidth(); 1659 Module *M = FromBB->getModule(); 1660 ValueLatticeElement Result = 1661 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI); 1662 1663 if (Result.isUnknown()) 1664 return ConstantRange::getEmpty(Width); 1665 if (Result.isConstantRange()) 1666 return Result.getConstantRange(); 1667 // We represent ConstantInt constants as constant ranges but other kinds 1668 // of integer constants, i.e. ConstantExpr will be tagged as constants 1669 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && 1670 "ConstantInt value must be represented as constantrange"); 1671 return ConstantRange::getFull(Width); 1672 } 1673 1674 static LazyValueInfo::Tristate 1675 getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val, 1676 const DataLayout &DL, TargetLibraryInfo *TLI) { 1677 // If we know the value is a constant, evaluate the conditional. 1678 Constant *Res = nullptr; 1679 if (Val.isConstant()) { 1680 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI); 1681 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res)) 1682 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True; 1683 return LazyValueInfo::Unknown; 1684 } 1685 1686 if (Val.isConstantRange()) { 1687 ConstantInt *CI = dyn_cast<ConstantInt>(C); 1688 if (!CI) return LazyValueInfo::Unknown; 1689 1690 const ConstantRange &CR = Val.getConstantRange(); 1691 if (Pred == ICmpInst::ICMP_EQ) { 1692 if (!CR.contains(CI->getValue())) 1693 return LazyValueInfo::False; 1694 1695 if (CR.isSingleElement()) 1696 return LazyValueInfo::True; 1697 } else if (Pred == ICmpInst::ICMP_NE) { 1698 if (!CR.contains(CI->getValue())) 1699 return LazyValueInfo::True; 1700 1701 if (CR.isSingleElement()) 1702 return LazyValueInfo::False; 1703 } else { 1704 // Handle more complex predicates. 1705 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion( 1706 (ICmpInst::Predicate)Pred, CI->getValue()); 1707 if (TrueValues.contains(CR)) 1708 return LazyValueInfo::True; 1709 if (TrueValues.inverse().contains(CR)) 1710 return LazyValueInfo::False; 1711 } 1712 return LazyValueInfo::Unknown; 1713 } 1714 1715 if (Val.isNotConstant()) { 1716 // If this is an equality comparison, we can try to fold it knowing that 1717 // "V != C1". 1718 if (Pred == ICmpInst::ICMP_EQ) { 1719 // !C1 == C -> false iff C1 == C. 1720 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, 1721 Val.getNotConstant(), C, DL, 1722 TLI); 1723 if (Res->isNullValue()) 1724 return LazyValueInfo::False; 1725 } else if (Pred == ICmpInst::ICMP_NE) { 1726 // !C1 != C -> true iff C1 == C. 1727 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, 1728 Val.getNotConstant(), C, DL, 1729 TLI); 1730 if (Res->isNullValue()) 1731 return LazyValueInfo::True; 1732 } 1733 return LazyValueInfo::Unknown; 1734 } 1735 1736 return LazyValueInfo::Unknown; 1737 } 1738 1739 /// Determine whether the specified value comparison with a constant is known to 1740 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate. 1741 LazyValueInfo::Tristate 1742 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, 1743 BasicBlock *FromBB, BasicBlock *ToBB, 1744 Instruction *CxtI) { 1745 Module *M = FromBB->getModule(); 1746 ValueLatticeElement Result = 1747 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI); 1748 1749 return getPredicateResult(Pred, C, Result, M->getDataLayout(), TLI); 1750 } 1751 1752 LazyValueInfo::Tristate 1753 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C, 1754 Instruction *CxtI, bool UseBlockValue) { 1755 // Is or is not NonNull are common predicates being queried. If 1756 // isKnownNonZero can tell us the result of the predicate, we can 1757 // return it quickly. But this is only a fastpath, and falling 1758 // through would still be correct. 1759 Module *M = CxtI->getModule(); 1760 const DataLayout &DL = M->getDataLayout(); 1761 if (V->getType()->isPointerTy() && C->isNullValue() && 1762 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) { 1763 if (Pred == ICmpInst::ICMP_EQ) 1764 return LazyValueInfo::False; 1765 else if (Pred == ICmpInst::ICMP_NE) 1766 return LazyValueInfo::True; 1767 } 1768 1769 ValueLatticeElement Result = UseBlockValue 1770 ? getImpl(PImpl, AC, M).getValueInBlock(V, CxtI->getParent(), CxtI) 1771 : getImpl(PImpl, AC, M).getValueAt(V, CxtI); 1772 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI); 1773 if (Ret != Unknown) 1774 return Ret; 1775 1776 // Note: The following bit of code is somewhat distinct from the rest of LVI; 1777 // LVI as a whole tries to compute a lattice value which is conservatively 1778 // correct at a given location. In this case, we have a predicate which we 1779 // weren't able to prove about the merged result, and we're pushing that 1780 // predicate back along each incoming edge to see if we can prove it 1781 // separately for each input. As a motivating example, consider: 1782 // bb1: 1783 // %v1 = ... ; constantrange<1, 5> 1784 // br label %merge 1785 // bb2: 1786 // %v2 = ... ; constantrange<10, 20> 1787 // br label %merge 1788 // merge: 1789 // %phi = phi [%v1, %v2] ; constantrange<1,20> 1790 // %pred = icmp eq i32 %phi, 8 1791 // We can't tell from the lattice value for '%phi' that '%pred' is false 1792 // along each path, but by checking the predicate over each input separately, 1793 // we can. 1794 // We limit the search to one step backwards from the current BB and value. 1795 // We could consider extending this to search further backwards through the 1796 // CFG and/or value graph, but there are non-obvious compile time vs quality 1797 // tradeoffs. 1798 BasicBlock *BB = CxtI->getParent(); 1799 1800 // Function entry or an unreachable block. Bail to avoid confusing 1801 // analysis below. 1802 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 1803 if (PI == PE) 1804 return Unknown; 1805 1806 // If V is a PHI node in the same block as the context, we need to ask 1807 // questions about the predicate as applied to the incoming value along 1808 // each edge. This is useful for eliminating cases where the predicate is 1809 // known along all incoming edges. 1810 if (auto *PHI = dyn_cast<PHINode>(V)) 1811 if (PHI->getParent() == BB) { 1812 Tristate Baseline = Unknown; 1813 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) { 1814 Value *Incoming = PHI->getIncomingValue(i); 1815 BasicBlock *PredBB = PHI->getIncomingBlock(i); 1816 // Note that PredBB may be BB itself. 1817 Tristate Result = 1818 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI); 1819 1820 // Keep going as long as we've seen a consistent known result for 1821 // all inputs. 1822 Baseline = (i == 0) ? Result /* First iteration */ 1823 : (Baseline == Result ? Baseline 1824 : Unknown); /* All others */ 1825 if (Baseline == Unknown) 1826 break; 1827 } 1828 if (Baseline != Unknown) 1829 return Baseline; 1830 } 1831 1832 // For a comparison where the V is outside this block, it's possible 1833 // that we've branched on it before. Look to see if the value is known 1834 // on all incoming edges. 1835 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) { 1836 // For predecessor edge, determine if the comparison is true or false 1837 // on that edge. If they're all true or all false, we can conclude 1838 // the value of the comparison in this block. 1839 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI); 1840 if (Baseline != Unknown) { 1841 // Check that all remaining incoming values match the first one. 1842 while (++PI != PE) { 1843 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI); 1844 if (Ret != Baseline) 1845 break; 1846 } 1847 // If we terminated early, then one of the values didn't match. 1848 if (PI == PE) { 1849 return Baseline; 1850 } 1851 } 1852 } 1853 1854 return Unknown; 1855 } 1856 1857 LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS, 1858 Value *RHS, 1859 Instruction *CxtI, 1860 bool UseBlockValue) { 1861 CmpInst::Predicate Pred = (CmpInst::Predicate)P; 1862 1863 if (auto *C = dyn_cast<Constant>(RHS)) 1864 return getPredicateAt(P, LHS, C, CxtI, UseBlockValue); 1865 if (auto *C = dyn_cast<Constant>(LHS)) 1866 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI, 1867 UseBlockValue); 1868 1869 // Got two non-Constant values. While we could handle them somewhat, 1870 // by getting their constant ranges, and applying ConstantRange::icmp(), 1871 // so far it did not appear to be profitable. 1872 return LazyValueInfo::Unknown; 1873 } 1874 1875 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, 1876 BasicBlock *NewSucc) { 1877 if (PImpl) { 1878 getImpl(PImpl, AC, PredBB->getModule()) 1879 .threadEdge(PredBB, OldSucc, NewSucc); 1880 } 1881 } 1882 1883 void LazyValueInfo::eraseBlock(BasicBlock *BB) { 1884 if (PImpl) { 1885 getImpl(PImpl, AC, BB->getModule()).eraseBlock(BB); 1886 } 1887 } 1888 1889 1890 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) { 1891 if (PImpl) { 1892 getImpl(PImpl, AC, F.getParent()).printLVI(F, DTree, OS); 1893 } 1894 } 1895 1896 // Print the LVI for the function arguments at the start of each basic block. 1897 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot( 1898 const BasicBlock *BB, formatted_raw_ostream &OS) { 1899 // Find if there are latticevalues defined for arguments of the function. 1900 auto *F = BB->getParent(); 1901 for (auto &Arg : F->args()) { 1902 ValueLatticeElement Result = LVIImpl->getValueInBlock( 1903 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB)); 1904 if (Result.isUnknown()) 1905 continue; 1906 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n"; 1907 } 1908 } 1909 1910 // This function prints the LVI analysis for the instruction I at the beginning 1911 // of various basic blocks. It relies on calculated values that are stored in 1912 // the LazyValueInfoCache, and in the absence of cached values, recalculate the 1913 // LazyValueInfo for `I`, and print that info. 1914 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot( 1915 const Instruction *I, formatted_raw_ostream &OS) { 1916 1917 auto *ParentBB = I->getParent(); 1918 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI; 1919 // We can generate (solve) LVI values only for blocks that are dominated by 1920 // the I's parent. However, to avoid generating LVI for all dominating blocks, 1921 // that contain redundant/uninteresting information, we print LVI for 1922 // blocks that may use this LVI information (such as immediate successor 1923 // blocks, and blocks that contain uses of `I`). 1924 auto printResult = [&](const BasicBlock *BB) { 1925 if (!BlocksContainingLVI.insert(BB).second) 1926 return; 1927 ValueLatticeElement Result = LVIImpl->getValueInBlock( 1928 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB)); 1929 OS << "; LatticeVal for: '" << *I << "' in BB: '"; 1930 BB->printAsOperand(OS, false); 1931 OS << "' is: " << Result << "\n"; 1932 }; 1933 1934 printResult(ParentBB); 1935 // Print the LVI analysis results for the immediate successor blocks, that 1936 // are dominated by `ParentBB`. 1937 for (auto *BBSucc : successors(ParentBB)) 1938 if (DT.dominates(ParentBB, BBSucc)) 1939 printResult(BBSucc); 1940 1941 // Print LVI in blocks where `I` is used. 1942 for (auto *U : I->users()) 1943 if (auto *UseI = dyn_cast<Instruction>(U)) 1944 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent())) 1945 printResult(UseI->getParent()); 1946 1947 } 1948 1949 namespace { 1950 // Printer class for LazyValueInfo results. 1951 class LazyValueInfoPrinter : public FunctionPass { 1952 public: 1953 static char ID; // Pass identification, replacement for typeid 1954 LazyValueInfoPrinter() : FunctionPass(ID) { 1955 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry()); 1956 } 1957 1958 void getAnalysisUsage(AnalysisUsage &AU) const override { 1959 AU.setPreservesAll(); 1960 AU.addRequired<LazyValueInfoWrapperPass>(); 1961 AU.addRequired<DominatorTreeWrapperPass>(); 1962 } 1963 1964 // Get the mandatory dominator tree analysis and pass this in to the 1965 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional. 1966 bool runOnFunction(Function &F) override { 1967 dbgs() << "LVI for function '" << F.getName() << "':\n"; 1968 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI(); 1969 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1970 LVI.printLVI(F, DTree, dbgs()); 1971 return false; 1972 } 1973 }; 1974 } 1975 1976 char LazyValueInfoPrinter::ID = 0; 1977 INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info", 1978 "Lazy Value Info Printer Pass", false, false) 1979 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass) 1980 INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info", 1981 "Lazy Value Info Printer Pass", false, false) 1982