1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interface for lazy computation of value constraint 10 // information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/LazyValueInfo.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/Optional.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/ConstantFolding.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/ValueLattice.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/AssemblyAnnotationWriter.h" 25 #include "llvm/IR/CFG.h" 26 #include "llvm/IR/ConstantRange.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Dominators.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/IR/PatternMatch.h" 35 #include "llvm/IR/ValueHandle.h" 36 #include "llvm/InitializePasses.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/FormattedStream.h" 39 #include "llvm/Support/raw_ostream.h" 40 #include <map> 41 using namespace llvm; 42 using namespace PatternMatch; 43 44 #define DEBUG_TYPE "lazy-value-info" 45 46 // This is the number of worklist items we will process to try to discover an 47 // answer for a given value. 48 static const unsigned MaxProcessedPerValue = 500; 49 50 char LazyValueInfoWrapperPass::ID = 0; 51 LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) { 52 initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry()); 53 } 54 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info", 55 "Lazy Value Information Analysis", false, true) 56 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 57 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 58 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info", 59 "Lazy Value Information Analysis", false, true) 60 61 namespace llvm { 62 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); } 63 } 64 65 AnalysisKey LazyValueAnalysis::Key; 66 67 /// Returns true if this lattice value represents at most one possible value. 68 /// This is as precise as any lattice value can get while still representing 69 /// reachable code. 70 static bool hasSingleValue(const ValueLatticeElement &Val) { 71 if (Val.isConstantRange() && 72 Val.getConstantRange().isSingleElement()) 73 // Integer constants are single element ranges 74 return true; 75 if (Val.isConstant()) 76 // Non integer constants 77 return true; 78 return false; 79 } 80 81 /// Combine two sets of facts about the same value into a single set of 82 /// facts. Note that this method is not suitable for merging facts along 83 /// different paths in a CFG; that's what the mergeIn function is for. This 84 /// is for merging facts gathered about the same value at the same location 85 /// through two independent means. 86 /// Notes: 87 /// * This method does not promise to return the most precise possible lattice 88 /// value implied by A and B. It is allowed to return any lattice element 89 /// which is at least as strong as *either* A or B (unless our facts 90 /// conflict, see below). 91 /// * Due to unreachable code, the intersection of two lattice values could be 92 /// contradictory. If this happens, we return some valid lattice value so as 93 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but 94 /// we do not make this guarantee. TODO: This would be a useful enhancement. 95 static ValueLatticeElement intersect(const ValueLatticeElement &A, 96 const ValueLatticeElement &B) { 97 // Undefined is the strongest state. It means the value is known to be along 98 // an unreachable path. 99 if (A.isUnknown()) 100 return A; 101 if (B.isUnknown()) 102 return B; 103 104 // If we gave up for one, but got a useable fact from the other, use it. 105 if (A.isOverdefined()) 106 return B; 107 if (B.isOverdefined()) 108 return A; 109 110 // Can't get any more precise than constants. 111 if (hasSingleValue(A)) 112 return A; 113 if (hasSingleValue(B)) 114 return B; 115 116 // Could be either constant range or not constant here. 117 if (!A.isConstantRange() || !B.isConstantRange()) { 118 // TODO: Arbitrary choice, could be improved 119 return A; 120 } 121 122 // Intersect two constant ranges 123 ConstantRange Range = 124 A.getConstantRange().intersectWith(B.getConstantRange()); 125 // Note: An empty range is implicitly converted to overdefined internally. 126 // TODO: We could instead use Undefined here since we've proven a conflict 127 // and thus know this path must be unreachable. 128 return ValueLatticeElement::getRange(std::move(Range)); 129 } 130 131 //===----------------------------------------------------------------------===// 132 // LazyValueInfoCache Decl 133 //===----------------------------------------------------------------------===// 134 135 namespace { 136 /// A callback value handle updates the cache when values are erased. 137 class LazyValueInfoCache; 138 struct LVIValueHandle final : public CallbackVH { 139 // Needs to access getValPtr(), which is protected. 140 friend struct DenseMapInfo<LVIValueHandle>; 141 142 LazyValueInfoCache *Parent; 143 144 LVIValueHandle(Value *V, LazyValueInfoCache *P) 145 : CallbackVH(V), Parent(P) { } 146 147 void deleted() override; 148 void allUsesReplacedWith(Value *V) override { 149 deleted(); 150 } 151 }; 152 } // end anonymous namespace 153 154 namespace { 155 /// This is the cache kept by LazyValueInfo which 156 /// maintains information about queries across the clients' queries. 157 class LazyValueInfoCache { 158 /// This is all of the cached block information for exactly one Value*. 159 /// The entries are sorted by the BasicBlock* of the 160 /// entries, allowing us to do a lookup with a binary search. 161 /// Over-defined lattice values are recorded in OverDefinedCache to reduce 162 /// memory overhead. 163 struct ValueCacheEntryTy { 164 ValueCacheEntryTy(Value *V, LazyValueInfoCache *P) : Handle(V, P) {} 165 LVIValueHandle Handle; 166 SmallDenseMap<PoisoningVH<BasicBlock>, ValueLatticeElement, 4> BlockVals; 167 }; 168 169 /// This tracks, on a per-block basis, the set of values that are 170 /// over-defined at the end of that block. 171 typedef DenseMap<PoisoningVH<BasicBlock>, SmallPtrSet<Value *, 4>> 172 OverDefinedCacheTy; 173 /// Keep track of all blocks that we have ever seen, so we 174 /// don't spend time removing unused blocks from our caches. 175 DenseSet<PoisoningVH<BasicBlock> > SeenBlocks; 176 177 /// This is all of the cached information for all values, 178 /// mapped from Value* to key information. 179 DenseMap<Value *, std::unique_ptr<ValueCacheEntryTy>> ValueCache; 180 OverDefinedCacheTy OverDefinedCache; 181 182 183 public: 184 void insertResult(Value *Val, BasicBlock *BB, 185 const ValueLatticeElement &Result) { 186 SeenBlocks.insert(BB); 187 188 // Insert over-defined values into their own cache to reduce memory 189 // overhead. 190 if (Result.isOverdefined()) 191 OverDefinedCache[BB].insert(Val); 192 else { 193 auto It = ValueCache.find_as(Val); 194 if (It == ValueCache.end()) { 195 ValueCache[Val] = std::make_unique<ValueCacheEntryTy>(Val, this); 196 It = ValueCache.find_as(Val); 197 assert(It != ValueCache.end() && "Val was just added to the map!"); 198 } 199 It->second->BlockVals[BB] = Result; 200 } 201 } 202 203 bool isOverdefined(Value *V, BasicBlock *BB) const { 204 auto ODI = OverDefinedCache.find(BB); 205 206 if (ODI == OverDefinedCache.end()) 207 return false; 208 209 return ODI->second.count(V); 210 } 211 212 bool hasCachedValueInfo(Value *V, BasicBlock *BB) const { 213 if (isOverdefined(V, BB)) 214 return true; 215 216 auto I = ValueCache.find_as(V); 217 if (I == ValueCache.end()) 218 return false; 219 220 return I->second->BlockVals.count(BB); 221 } 222 223 ValueLatticeElement getCachedValueInfo(Value *V, BasicBlock *BB) const { 224 if (isOverdefined(V, BB)) 225 return ValueLatticeElement::getOverdefined(); 226 227 auto I = ValueCache.find_as(V); 228 if (I == ValueCache.end()) 229 return ValueLatticeElement(); 230 auto BBI = I->second->BlockVals.find(BB); 231 if (BBI == I->second->BlockVals.end()) 232 return ValueLatticeElement(); 233 return BBI->second; 234 } 235 236 /// clear - Empty the cache. 237 void clear() { 238 SeenBlocks.clear(); 239 ValueCache.clear(); 240 OverDefinedCache.clear(); 241 } 242 243 /// Inform the cache that a given value has been deleted. 244 void eraseValue(Value *V); 245 246 /// This is part of the update interface to inform the cache 247 /// that a block has been deleted. 248 void eraseBlock(BasicBlock *BB); 249 250 /// Updates the cache to remove any influence an overdefined value in 251 /// OldSucc might have (unless also overdefined in NewSucc). This just 252 /// flushes elements from the cache and does not add any. 253 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc); 254 255 friend struct LVIValueHandle; 256 }; 257 } 258 259 void LazyValueInfoCache::eraseValue(Value *V) { 260 for (auto I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E;) { 261 // Copy and increment the iterator immediately so we can erase behind 262 // ourselves. 263 auto Iter = I++; 264 SmallPtrSetImpl<Value *> &ValueSet = Iter->second; 265 ValueSet.erase(V); 266 if (ValueSet.empty()) 267 OverDefinedCache.erase(Iter); 268 } 269 270 ValueCache.erase(V); 271 } 272 273 void LVIValueHandle::deleted() { 274 // This erasure deallocates *this, so it MUST happen after we're done 275 // using any and all members of *this. 276 Parent->eraseValue(*this); 277 } 278 279 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) { 280 // Shortcut if we have never seen this block. 281 DenseSet<PoisoningVH<BasicBlock> >::iterator I = SeenBlocks.find(BB); 282 if (I == SeenBlocks.end()) 283 return; 284 SeenBlocks.erase(I); 285 286 auto ODI = OverDefinedCache.find(BB); 287 if (ODI != OverDefinedCache.end()) 288 OverDefinedCache.erase(ODI); 289 290 for (auto &I : ValueCache) 291 I.second->BlockVals.erase(BB); 292 } 293 294 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc, 295 BasicBlock *NewSucc) { 296 // When an edge in the graph has been threaded, values that we could not 297 // determine a value for before (i.e. were marked overdefined) may be 298 // possible to solve now. We do NOT try to proactively update these values. 299 // Instead, we clear their entries from the cache, and allow lazy updating to 300 // recompute them when needed. 301 302 // The updating process is fairly simple: we need to drop cached info 303 // for all values that were marked overdefined in OldSucc, and for those same 304 // values in any successor of OldSucc (except NewSucc) in which they were 305 // also marked overdefined. 306 std::vector<BasicBlock*> worklist; 307 worklist.push_back(OldSucc); 308 309 auto I = OverDefinedCache.find(OldSucc); 310 if (I == OverDefinedCache.end()) 311 return; // Nothing to process here. 312 SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end()); 313 314 // Use a worklist to perform a depth-first search of OldSucc's successors. 315 // NOTE: We do not need a visited list since any blocks we have already 316 // visited will have had their overdefined markers cleared already, and we 317 // thus won't loop to their successors. 318 while (!worklist.empty()) { 319 BasicBlock *ToUpdate = worklist.back(); 320 worklist.pop_back(); 321 322 // Skip blocks only accessible through NewSucc. 323 if (ToUpdate == NewSucc) continue; 324 325 // If a value was marked overdefined in OldSucc, and is here too... 326 auto OI = OverDefinedCache.find(ToUpdate); 327 if (OI == OverDefinedCache.end()) 328 continue; 329 SmallPtrSetImpl<Value *> &ValueSet = OI->second; 330 331 bool changed = false; 332 for (Value *V : ValsToClear) { 333 if (!ValueSet.erase(V)) 334 continue; 335 336 // If we removed anything, then we potentially need to update 337 // blocks successors too. 338 changed = true; 339 340 if (ValueSet.empty()) { 341 OverDefinedCache.erase(OI); 342 break; 343 } 344 } 345 346 if (!changed) continue; 347 348 worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate)); 349 } 350 } 351 352 353 namespace { 354 /// An assembly annotator class to print LazyValueCache information in 355 /// comments. 356 class LazyValueInfoImpl; 357 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter { 358 LazyValueInfoImpl *LVIImpl; 359 // While analyzing which blocks we can solve values for, we need the dominator 360 // information. Since this is an optional parameter in LVI, we require this 361 // DomTreeAnalysis pass in the printer pass, and pass the dominator 362 // tree to the LazyValueInfoAnnotatedWriter. 363 DominatorTree &DT; 364 365 public: 366 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree) 367 : LVIImpl(L), DT(DTree) {} 368 369 virtual void emitBasicBlockStartAnnot(const BasicBlock *BB, 370 formatted_raw_ostream &OS); 371 372 virtual void emitInstructionAnnot(const Instruction *I, 373 formatted_raw_ostream &OS); 374 }; 375 } 376 namespace { 377 // The actual implementation of the lazy analysis and update. Note that the 378 // inheritance from LazyValueInfoCache is intended to be temporary while 379 // splitting the code and then transitioning to a has-a relationship. 380 class LazyValueInfoImpl { 381 382 /// Cached results from previous queries 383 LazyValueInfoCache TheCache; 384 385 /// This stack holds the state of the value solver during a query. 386 /// It basically emulates the callstack of the naive 387 /// recursive value lookup process. 388 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack; 389 390 /// Keeps track of which block-value pairs are in BlockValueStack. 391 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet; 392 393 /// Push BV onto BlockValueStack unless it's already in there. 394 /// Returns true on success. 395 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) { 396 if (!BlockValueSet.insert(BV).second) 397 return false; // It's already in the stack. 398 399 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in " 400 << BV.first->getName() << "\n"); 401 BlockValueStack.push_back(BV); 402 return true; 403 } 404 405 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls. 406 const DataLayout &DL; ///< A mandatory DataLayout 407 DominatorTree *DT; ///< An optional DT pointer. 408 DominatorTree *DisabledDT; ///< Stores DT if it's disabled. 409 410 ValueLatticeElement getBlockValue(Value *Val, BasicBlock *BB); 411 bool getEdgeValue(Value *V, BasicBlock *F, BasicBlock *T, 412 ValueLatticeElement &Result, Instruction *CxtI = nullptr); 413 bool hasBlockValue(Value *Val, BasicBlock *BB); 414 415 // These methods process one work item and may add more. A false value 416 // returned means that the work item was not completely processed and must 417 // be revisited after going through the new items. 418 bool solveBlockValue(Value *Val, BasicBlock *BB); 419 bool solveBlockValueImpl(ValueLatticeElement &Res, Value *Val, 420 BasicBlock *BB); 421 bool solveBlockValueNonLocal(ValueLatticeElement &BBLV, Value *Val, 422 BasicBlock *BB); 423 bool solveBlockValuePHINode(ValueLatticeElement &BBLV, PHINode *PN, 424 BasicBlock *BB); 425 bool solveBlockValueSelect(ValueLatticeElement &BBLV, SelectInst *S, 426 BasicBlock *BB); 427 Optional<ConstantRange> getRangeForOperand(unsigned Op, Instruction *I, 428 BasicBlock *BB); 429 bool solveBlockValueBinaryOpImpl( 430 ValueLatticeElement &BBLV, Instruction *I, BasicBlock *BB, 431 std::function<ConstantRange(const ConstantRange &, 432 const ConstantRange &)> OpFn); 433 bool solveBlockValueBinaryOp(ValueLatticeElement &BBLV, BinaryOperator *BBI, 434 BasicBlock *BB); 435 bool solveBlockValueCast(ValueLatticeElement &BBLV, CastInst *CI, 436 BasicBlock *BB); 437 bool solveBlockValueOverflowIntrinsic( 438 ValueLatticeElement &BBLV, WithOverflowInst *WO, BasicBlock *BB); 439 bool solveBlockValueSaturatingIntrinsic(ValueLatticeElement &BBLV, 440 SaturatingInst *SI, BasicBlock *BB); 441 bool solveBlockValueIntrinsic(ValueLatticeElement &BBLV, IntrinsicInst *II, 442 BasicBlock *BB); 443 bool solveBlockValueExtractValue(ValueLatticeElement &BBLV, 444 ExtractValueInst *EVI, BasicBlock *BB); 445 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val, 446 ValueLatticeElement &BBLV, 447 Instruction *BBI); 448 449 void solve(); 450 451 public: 452 /// This is the query interface to determine the lattice 453 /// value for the specified Value* at the end of the specified block. 454 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, 455 Instruction *CxtI = nullptr); 456 457 /// This is the query interface to determine the lattice 458 /// value for the specified Value* at the specified instruction (generally 459 /// from an assume intrinsic). 460 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI); 461 462 /// This is the query interface to determine the lattice 463 /// value for the specified Value* that is true on the specified edge. 464 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, 465 BasicBlock *ToBB, 466 Instruction *CxtI = nullptr); 467 468 /// Complete flush all previously computed values 469 void clear() { 470 TheCache.clear(); 471 } 472 473 /// Printing the LazyValueInfo Analysis. 474 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) { 475 LazyValueInfoAnnotatedWriter Writer(this, DTree); 476 F.print(OS, &Writer); 477 } 478 479 /// This is part of the update interface to inform the cache 480 /// that a block has been deleted. 481 void eraseBlock(BasicBlock *BB) { 482 TheCache.eraseBlock(BB); 483 } 484 485 /// Disables use of the DominatorTree within LVI. 486 void disableDT() { 487 if (DT) { 488 assert(!DisabledDT && "Both DT and DisabledDT are not nullptr!"); 489 std::swap(DT, DisabledDT); 490 } 491 } 492 493 /// Enables use of the DominatorTree within LVI. Does nothing if the class 494 /// instance was initialized without a DT pointer. 495 void enableDT() { 496 if (DisabledDT) { 497 assert(!DT && "Both DT and DisabledDT are not nullptr!"); 498 std::swap(DT, DisabledDT); 499 } 500 } 501 502 /// This is the update interface to inform the cache that an edge from 503 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc. 504 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc); 505 506 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, 507 DominatorTree *DT = nullptr) 508 : AC(AC), DL(DL), DT(DT), DisabledDT(nullptr) {} 509 }; 510 } // end anonymous namespace 511 512 513 void LazyValueInfoImpl::solve() { 514 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack( 515 BlockValueStack.begin(), BlockValueStack.end()); 516 517 unsigned processedCount = 0; 518 while (!BlockValueStack.empty()) { 519 processedCount++; 520 // Abort if we have to process too many values to get a result for this one. 521 // Because of the design of the overdefined cache currently being per-block 522 // to avoid naming-related issues (IE it wants to try to give different 523 // results for the same name in different blocks), overdefined results don't 524 // get cached globally, which in turn means we will often try to rediscover 525 // the same overdefined result again and again. Once something like 526 // PredicateInfo is used in LVI or CVP, we should be able to make the 527 // overdefined cache global, and remove this throttle. 528 if (processedCount > MaxProcessedPerValue) { 529 LLVM_DEBUG( 530 dbgs() << "Giving up on stack because we are getting too deep\n"); 531 // Fill in the original values 532 while (!StartingStack.empty()) { 533 std::pair<BasicBlock *, Value *> &e = StartingStack.back(); 534 TheCache.insertResult(e.second, e.first, 535 ValueLatticeElement::getOverdefined()); 536 StartingStack.pop_back(); 537 } 538 BlockValueSet.clear(); 539 BlockValueStack.clear(); 540 return; 541 } 542 std::pair<BasicBlock *, Value *> e = BlockValueStack.back(); 543 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!"); 544 545 if (solveBlockValue(e.second, e.first)) { 546 // The work item was completely processed. 547 assert(BlockValueStack.back() == e && "Nothing should have been pushed!"); 548 assert(TheCache.hasCachedValueInfo(e.second, e.first) && 549 "Result should be in cache!"); 550 551 LLVM_DEBUG( 552 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = " 553 << TheCache.getCachedValueInfo(e.second, e.first) << "\n"); 554 555 BlockValueStack.pop_back(); 556 BlockValueSet.erase(e); 557 } else { 558 // More work needs to be done before revisiting. 559 assert(BlockValueStack.back() != e && "Stack should have been pushed!"); 560 } 561 } 562 } 563 564 bool LazyValueInfoImpl::hasBlockValue(Value *Val, BasicBlock *BB) { 565 // If already a constant, there is nothing to compute. 566 if (isa<Constant>(Val)) 567 return true; 568 569 return TheCache.hasCachedValueInfo(Val, BB); 570 } 571 572 ValueLatticeElement LazyValueInfoImpl::getBlockValue(Value *Val, 573 BasicBlock *BB) { 574 // If already a constant, there is nothing to compute. 575 if (Constant *VC = dyn_cast<Constant>(Val)) 576 return ValueLatticeElement::get(VC); 577 578 return TheCache.getCachedValueInfo(Val, BB); 579 } 580 581 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) { 582 switch (BBI->getOpcode()) { 583 default: break; 584 case Instruction::Load: 585 case Instruction::Call: 586 case Instruction::Invoke: 587 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range)) 588 if (isa<IntegerType>(BBI->getType())) { 589 return ValueLatticeElement::getRange( 590 getConstantRangeFromMetadata(*Ranges)); 591 } 592 break; 593 }; 594 // Nothing known - will be intersected with other facts 595 return ValueLatticeElement::getOverdefined(); 596 } 597 598 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) { 599 if (isa<Constant>(Val)) 600 return true; 601 602 if (TheCache.hasCachedValueInfo(Val, BB)) { 603 // If we have a cached value, use that. 604 LLVM_DEBUG(dbgs() << " reuse BB '" << BB->getName() << "' val=" 605 << TheCache.getCachedValueInfo(Val, BB) << '\n'); 606 607 // Since we're reusing a cached value, we don't need to update the 608 // OverDefinedCache. The cache will have been properly updated whenever the 609 // cached value was inserted. 610 return true; 611 } 612 613 // Hold off inserting this value into the Cache in case we have to return 614 // false and come back later. 615 ValueLatticeElement Res; 616 if (!solveBlockValueImpl(Res, Val, BB)) 617 // Work pushed, will revisit 618 return false; 619 620 TheCache.insertResult(Val, BB, Res); 621 return true; 622 } 623 624 bool LazyValueInfoImpl::solveBlockValueImpl(ValueLatticeElement &Res, 625 Value *Val, BasicBlock *BB) { 626 627 Instruction *BBI = dyn_cast<Instruction>(Val); 628 if (!BBI || BBI->getParent() != BB) 629 return solveBlockValueNonLocal(Res, Val, BB); 630 631 if (PHINode *PN = dyn_cast<PHINode>(BBI)) 632 return solveBlockValuePHINode(Res, PN, BB); 633 634 if (auto *SI = dyn_cast<SelectInst>(BBI)) 635 return solveBlockValueSelect(Res, SI, BB); 636 637 // If this value is a nonnull pointer, record it's range and bailout. Note 638 // that for all other pointer typed values, we terminate the search at the 639 // definition. We could easily extend this to look through geps, bitcasts, 640 // and the like to prove non-nullness, but it's not clear that's worth it 641 // compile time wise. The context-insensitive value walk done inside 642 // isKnownNonZero gets most of the profitable cases at much less expense. 643 // This does mean that we have a sensitivity to where the defining 644 // instruction is placed, even if it could legally be hoisted much higher. 645 // That is unfortunate. 646 PointerType *PT = dyn_cast<PointerType>(BBI->getType()); 647 if (PT && isKnownNonZero(BBI, DL)) { 648 Res = ValueLatticeElement::getNot(ConstantPointerNull::get(PT)); 649 return true; 650 } 651 if (BBI->getType()->isIntegerTy()) { 652 if (auto *CI = dyn_cast<CastInst>(BBI)) 653 return solveBlockValueCast(Res, CI, BB); 654 655 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI)) 656 return solveBlockValueBinaryOp(Res, BO, BB); 657 658 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI)) 659 return solveBlockValueExtractValue(Res, EVI, BB); 660 661 if (auto *II = dyn_cast<IntrinsicInst>(BBI)) 662 return solveBlockValueIntrinsic(Res, II, BB); 663 } 664 665 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 666 << "' - unknown inst def found.\n"); 667 Res = getFromRangeMetadata(BBI); 668 return true; 669 } 670 671 static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) { 672 if (LoadInst *L = dyn_cast<LoadInst>(I)) { 673 return L->getPointerAddressSpace() == 0 && 674 GetUnderlyingObject(L->getPointerOperand(), 675 L->getModule()->getDataLayout()) == Ptr; 676 } 677 if (StoreInst *S = dyn_cast<StoreInst>(I)) { 678 return S->getPointerAddressSpace() == 0 && 679 GetUnderlyingObject(S->getPointerOperand(), 680 S->getModule()->getDataLayout()) == Ptr; 681 } 682 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) { 683 if (MI->isVolatile()) return false; 684 685 // FIXME: check whether it has a valuerange that excludes zero? 686 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength()); 687 if (!Len || Len->isZero()) return false; 688 689 if (MI->getDestAddressSpace() == 0) 690 if (GetUnderlyingObject(MI->getRawDest(), 691 MI->getModule()->getDataLayout()) == Ptr) 692 return true; 693 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) 694 if (MTI->getSourceAddressSpace() == 0) 695 if (GetUnderlyingObject(MTI->getRawSource(), 696 MTI->getModule()->getDataLayout()) == Ptr) 697 return true; 698 } 699 return false; 700 } 701 702 /// Return true if the allocation associated with Val is ever dereferenced 703 /// within the given basic block. This establishes the fact Val is not null, 704 /// but does not imply that the memory at Val is dereferenceable. (Val may 705 /// point off the end of the dereferenceable part of the object.) 706 static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) { 707 assert(Val->getType()->isPointerTy()); 708 709 const DataLayout &DL = BB->getModule()->getDataLayout(); 710 Value *UnderlyingVal = GetUnderlyingObject(Val, DL); 711 // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge 712 // inside InstructionDereferencesPointer either. 713 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1)) 714 for (Instruction &I : *BB) 715 if (InstructionDereferencesPointer(&I, UnderlyingVal)) 716 return true; 717 return false; 718 } 719 720 bool LazyValueInfoImpl::solveBlockValueNonLocal(ValueLatticeElement &BBLV, 721 Value *Val, BasicBlock *BB) { 722 ValueLatticeElement Result; // Start Undefined. 723 724 // If this is the entry block, we must be asking about an argument. The 725 // value is overdefined. 726 if (BB == &BB->getParent()->getEntryBlock()) { 727 assert(isa<Argument>(Val) && "Unknown live-in to the entry block"); 728 // Before giving up, see if we can prove the pointer non-null local to 729 // this particular block. 730 PointerType *PTy = dyn_cast<PointerType>(Val->getType()); 731 if (PTy && 732 (isKnownNonZero(Val, DL) || 733 (isObjectDereferencedInBlock(Val, BB) && 734 !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace())))) { 735 Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy)); 736 } else { 737 Result = ValueLatticeElement::getOverdefined(); 738 } 739 BBLV = Result; 740 return true; 741 } 742 743 // Loop over all of our predecessors, merging what we know from them into 744 // result. If we encounter an unexplored predecessor, we eagerly explore it 745 // in a depth first manner. In practice, this has the effect of discovering 746 // paths we can't analyze eagerly without spending compile times analyzing 747 // other paths. This heuristic benefits from the fact that predecessors are 748 // frequently arranged such that dominating ones come first and we quickly 749 // find a path to function entry. TODO: We should consider explicitly 750 // canonicalizing to make this true rather than relying on this happy 751 // accident. 752 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 753 ValueLatticeElement EdgeResult; 754 if (!getEdgeValue(Val, *PI, BB, EdgeResult)) 755 // Explore that input, then return here 756 return false; 757 758 Result.mergeIn(EdgeResult, DL); 759 760 // If we hit overdefined, exit early. The BlockVals entry is already set 761 // to overdefined. 762 if (Result.isOverdefined()) { 763 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 764 << "' - overdefined because of pred (non local).\n"); 765 // Before giving up, see if we can prove the pointer non-null local to 766 // this particular block. 767 PointerType *PTy = dyn_cast<PointerType>(Val->getType()); 768 if (PTy && isObjectDereferencedInBlock(Val, BB) && 769 !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace())) { 770 Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy)); 771 } 772 773 BBLV = Result; 774 return true; 775 } 776 } 777 778 // Return the merged value, which is more precise than 'overdefined'. 779 assert(!Result.isOverdefined()); 780 BBLV = Result; 781 return true; 782 } 783 784 bool LazyValueInfoImpl::solveBlockValuePHINode(ValueLatticeElement &BBLV, 785 PHINode *PN, BasicBlock *BB) { 786 ValueLatticeElement Result; // Start Undefined. 787 788 // Loop over all of our predecessors, merging what we know from them into 789 // result. See the comment about the chosen traversal order in 790 // solveBlockValueNonLocal; the same reasoning applies here. 791 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 792 BasicBlock *PhiBB = PN->getIncomingBlock(i); 793 Value *PhiVal = PN->getIncomingValue(i); 794 ValueLatticeElement EdgeResult; 795 // Note that we can provide PN as the context value to getEdgeValue, even 796 // though the results will be cached, because PN is the value being used as 797 // the cache key in the caller. 798 if (!getEdgeValue(PhiVal, PhiBB, BB, EdgeResult, PN)) 799 // Explore that input, then return here 800 return false; 801 802 Result.mergeIn(EdgeResult, DL); 803 804 // If we hit overdefined, exit early. The BlockVals entry is already set 805 // to overdefined. 806 if (Result.isOverdefined()) { 807 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 808 << "' - overdefined because of pred (local).\n"); 809 810 BBLV = Result; 811 return true; 812 } 813 } 814 815 // Return the merged value, which is more precise than 'overdefined'. 816 assert(!Result.isOverdefined() && "Possible PHI in entry block?"); 817 BBLV = Result; 818 return true; 819 } 820 821 static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond, 822 bool isTrueDest = true); 823 824 // If we can determine a constraint on the value given conditions assumed by 825 // the program, intersect those constraints with BBLV 826 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange( 827 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) { 828 BBI = BBI ? BBI : dyn_cast<Instruction>(Val); 829 if (!BBI) 830 return; 831 832 for (auto &AssumeVH : AC->assumptionsFor(Val)) { 833 if (!AssumeVH) 834 continue; 835 auto *I = cast<CallInst>(AssumeVH); 836 if (!isValidAssumeForContext(I, BBI, DT)) 837 continue; 838 839 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0))); 840 } 841 842 // If guards are not used in the module, don't spend time looking for them 843 auto *GuardDecl = BBI->getModule()->getFunction( 844 Intrinsic::getName(Intrinsic::experimental_guard)); 845 if (!GuardDecl || GuardDecl->use_empty()) 846 return; 847 848 if (BBI->getIterator() == BBI->getParent()->begin()) 849 return; 850 for (Instruction &I : make_range(std::next(BBI->getIterator().getReverse()), 851 BBI->getParent()->rend())) { 852 Value *Cond = nullptr; 853 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond)))) 854 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond)); 855 } 856 } 857 858 bool LazyValueInfoImpl::solveBlockValueSelect(ValueLatticeElement &BBLV, 859 SelectInst *SI, BasicBlock *BB) { 860 861 // Recurse on our inputs if needed 862 if (!hasBlockValue(SI->getTrueValue(), BB)) { 863 if (pushBlockValue(std::make_pair(BB, SI->getTrueValue()))) 864 return false; 865 BBLV = ValueLatticeElement::getOverdefined(); 866 return true; 867 } 868 ValueLatticeElement TrueVal = getBlockValue(SI->getTrueValue(), BB); 869 // If we hit overdefined, don't ask more queries. We want to avoid poisoning 870 // extra slots in the table if we can. 871 if (TrueVal.isOverdefined()) { 872 BBLV = ValueLatticeElement::getOverdefined(); 873 return true; 874 } 875 876 if (!hasBlockValue(SI->getFalseValue(), BB)) { 877 if (pushBlockValue(std::make_pair(BB, SI->getFalseValue()))) 878 return false; 879 BBLV = ValueLatticeElement::getOverdefined(); 880 return true; 881 } 882 ValueLatticeElement FalseVal = getBlockValue(SI->getFalseValue(), BB); 883 // If we hit overdefined, don't ask more queries. We want to avoid poisoning 884 // extra slots in the table if we can. 885 if (FalseVal.isOverdefined()) { 886 BBLV = ValueLatticeElement::getOverdefined(); 887 return true; 888 } 889 890 if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) { 891 const ConstantRange &TrueCR = TrueVal.getConstantRange(); 892 const ConstantRange &FalseCR = FalseVal.getConstantRange(); 893 Value *LHS = nullptr; 894 Value *RHS = nullptr; 895 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS); 896 // Is this a min specifically of our two inputs? (Avoid the risk of 897 // ValueTracking getting smarter looking back past our immediate inputs.) 898 if (SelectPatternResult::isMinOrMax(SPR.Flavor) && 899 LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) { 900 ConstantRange ResultCR = [&]() { 901 switch (SPR.Flavor) { 902 default: 903 llvm_unreachable("unexpected minmax type!"); 904 case SPF_SMIN: /// Signed minimum 905 return TrueCR.smin(FalseCR); 906 case SPF_UMIN: /// Unsigned minimum 907 return TrueCR.umin(FalseCR); 908 case SPF_SMAX: /// Signed maximum 909 return TrueCR.smax(FalseCR); 910 case SPF_UMAX: /// Unsigned maximum 911 return TrueCR.umax(FalseCR); 912 }; 913 }(); 914 BBLV = ValueLatticeElement::getRange(ResultCR); 915 return true; 916 } 917 918 if (SPR.Flavor == SPF_ABS) { 919 if (LHS == SI->getTrueValue()) { 920 BBLV = ValueLatticeElement::getRange(TrueCR.abs()); 921 return true; 922 } 923 if (LHS == SI->getFalseValue()) { 924 BBLV = ValueLatticeElement::getRange(FalseCR.abs()); 925 return true; 926 } 927 } 928 929 if (SPR.Flavor == SPF_NABS) { 930 ConstantRange Zero(APInt::getNullValue(TrueCR.getBitWidth())); 931 if (LHS == SI->getTrueValue()) { 932 BBLV = ValueLatticeElement::getRange(Zero.sub(TrueCR.abs())); 933 return true; 934 } 935 if (LHS == SI->getFalseValue()) { 936 BBLV = ValueLatticeElement::getRange(Zero.sub(FalseCR.abs())); 937 return true; 938 } 939 } 940 } 941 942 // Can we constrain the facts about the true and false values by using the 943 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5). 944 // TODO: We could potentially refine an overdefined true value above. 945 Value *Cond = SI->getCondition(); 946 TrueVal = intersect(TrueVal, 947 getValueFromCondition(SI->getTrueValue(), Cond, true)); 948 FalseVal = intersect(FalseVal, 949 getValueFromCondition(SI->getFalseValue(), Cond, false)); 950 951 // Handle clamp idioms such as: 952 // %24 = constantrange<0, 17> 953 // %39 = icmp eq i32 %24, 0 954 // %40 = add i32 %24, -1 955 // %siv.next = select i1 %39, i32 16, i32 %40 956 // %siv.next = constantrange<0, 17> not <-1, 17> 957 // In general, this can handle any clamp idiom which tests the edge 958 // condition via an equality or inequality. 959 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) { 960 ICmpInst::Predicate Pred = ICI->getPredicate(); 961 Value *A = ICI->getOperand(0); 962 if (ConstantInt *CIBase = dyn_cast<ConstantInt>(ICI->getOperand(1))) { 963 auto addConstants = [](ConstantInt *A, ConstantInt *B) { 964 assert(A->getType() == B->getType()); 965 return ConstantInt::get(A->getType(), A->getValue() + B->getValue()); 966 }; 967 // See if either input is A + C2, subject to the constraint from the 968 // condition that A != C when that input is used. We can assume that 969 // that input doesn't include C + C2. 970 ConstantInt *CIAdded; 971 switch (Pred) { 972 default: break; 973 case ICmpInst::ICMP_EQ: 974 if (match(SI->getFalseValue(), m_Add(m_Specific(A), 975 m_ConstantInt(CIAdded)))) { 976 auto ResNot = addConstants(CIBase, CIAdded); 977 FalseVal = intersect(FalseVal, 978 ValueLatticeElement::getNot(ResNot)); 979 } 980 break; 981 case ICmpInst::ICMP_NE: 982 if (match(SI->getTrueValue(), m_Add(m_Specific(A), 983 m_ConstantInt(CIAdded)))) { 984 auto ResNot = addConstants(CIBase, CIAdded); 985 TrueVal = intersect(TrueVal, 986 ValueLatticeElement::getNot(ResNot)); 987 } 988 break; 989 }; 990 } 991 } 992 993 ValueLatticeElement Result; // Start Undefined. 994 Result.mergeIn(TrueVal, DL); 995 Result.mergeIn(FalseVal, DL); 996 BBLV = Result; 997 return true; 998 } 999 1000 Optional<ConstantRange> LazyValueInfoImpl::getRangeForOperand(unsigned Op, 1001 Instruction *I, 1002 BasicBlock *BB) { 1003 if (!hasBlockValue(I->getOperand(Op), BB)) 1004 if (pushBlockValue(std::make_pair(BB, I->getOperand(Op)))) 1005 return None; 1006 1007 const unsigned OperandBitWidth = 1008 DL.getTypeSizeInBits(I->getOperand(Op)->getType()); 1009 ConstantRange Range = ConstantRange::getFull(OperandBitWidth); 1010 if (hasBlockValue(I->getOperand(Op), BB)) { 1011 ValueLatticeElement Val = getBlockValue(I->getOperand(Op), BB); 1012 intersectAssumeOrGuardBlockValueConstantRange(I->getOperand(Op), Val, I); 1013 if (Val.isConstantRange()) 1014 Range = Val.getConstantRange(); 1015 } 1016 return Range; 1017 } 1018 1019 bool LazyValueInfoImpl::solveBlockValueCast(ValueLatticeElement &BBLV, 1020 CastInst *CI, 1021 BasicBlock *BB) { 1022 if (!CI->getOperand(0)->getType()->isSized()) { 1023 // Without knowing how wide the input is, we can't analyze it in any useful 1024 // way. 1025 BBLV = ValueLatticeElement::getOverdefined(); 1026 return true; 1027 } 1028 1029 // Filter out casts we don't know how to reason about before attempting to 1030 // recurse on our operand. This can cut a long search short if we know we're 1031 // not going to be able to get any useful information anways. 1032 switch (CI->getOpcode()) { 1033 case Instruction::Trunc: 1034 case Instruction::SExt: 1035 case Instruction::ZExt: 1036 case Instruction::BitCast: 1037 break; 1038 default: 1039 // Unhandled instructions are overdefined. 1040 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 1041 << "' - overdefined (unknown cast).\n"); 1042 BBLV = ValueLatticeElement::getOverdefined(); 1043 return true; 1044 } 1045 1046 // Figure out the range of the LHS. If that fails, we still apply the 1047 // transfer rule on the full set since we may be able to locally infer 1048 // interesting facts. 1049 Optional<ConstantRange> LHSRes = getRangeForOperand(0, CI, BB); 1050 if (!LHSRes.hasValue()) 1051 // More work to do before applying this transfer rule. 1052 return false; 1053 ConstantRange LHSRange = LHSRes.getValue(); 1054 1055 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth(); 1056 1057 // NOTE: We're currently limited by the set of operations that ConstantRange 1058 // can evaluate symbolically. Enhancing that set will allows us to analyze 1059 // more definitions. 1060 BBLV = ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(), 1061 ResultBitWidth)); 1062 return true; 1063 } 1064 1065 bool LazyValueInfoImpl::solveBlockValueBinaryOpImpl( 1066 ValueLatticeElement &BBLV, Instruction *I, BasicBlock *BB, 1067 std::function<ConstantRange(const ConstantRange &, 1068 const ConstantRange &)> OpFn) { 1069 // Figure out the ranges of the operands. If that fails, use a 1070 // conservative range, but apply the transfer rule anyways. This 1071 // lets us pick up facts from expressions like "and i32 (call i32 1072 // @foo()), 32" 1073 Optional<ConstantRange> LHSRes = getRangeForOperand(0, I, BB); 1074 Optional<ConstantRange> RHSRes = getRangeForOperand(1, I, BB); 1075 if (!LHSRes.hasValue() || !RHSRes.hasValue()) 1076 // More work to do before applying this transfer rule. 1077 return false; 1078 1079 ConstantRange LHSRange = LHSRes.getValue(); 1080 ConstantRange RHSRange = RHSRes.getValue(); 1081 BBLV = ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange)); 1082 return true; 1083 } 1084 1085 bool LazyValueInfoImpl::solveBlockValueBinaryOp(ValueLatticeElement &BBLV, 1086 BinaryOperator *BO, 1087 BasicBlock *BB) { 1088 1089 assert(BO->getOperand(0)->getType()->isSized() && 1090 "all operands to binary operators are sized"); 1091 if (BO->getOpcode() == Instruction::Xor) { 1092 // Xor is the only operation not supported by ConstantRange::binaryOp(). 1093 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 1094 << "' - overdefined (unknown binary operator).\n"); 1095 BBLV = ValueLatticeElement::getOverdefined(); 1096 return true; 1097 } 1098 1099 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) { 1100 unsigned NoWrapKind = 0; 1101 if (OBO->hasNoUnsignedWrap()) 1102 NoWrapKind |= OverflowingBinaryOperator::NoUnsignedWrap; 1103 if (OBO->hasNoSignedWrap()) 1104 NoWrapKind |= OverflowingBinaryOperator::NoSignedWrap; 1105 1106 return solveBlockValueBinaryOpImpl( 1107 BBLV, BO, BB, 1108 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) { 1109 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind); 1110 }); 1111 } 1112 1113 return solveBlockValueBinaryOpImpl( 1114 BBLV, BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) { 1115 return CR1.binaryOp(BO->getOpcode(), CR2); 1116 }); 1117 } 1118 1119 bool LazyValueInfoImpl::solveBlockValueOverflowIntrinsic( 1120 ValueLatticeElement &BBLV, WithOverflowInst *WO, BasicBlock *BB) { 1121 return solveBlockValueBinaryOpImpl(BBLV, WO, BB, 1122 [WO](const ConstantRange &CR1, const ConstantRange &CR2) { 1123 return CR1.binaryOp(WO->getBinaryOp(), CR2); 1124 }); 1125 } 1126 1127 bool LazyValueInfoImpl::solveBlockValueSaturatingIntrinsic( 1128 ValueLatticeElement &BBLV, SaturatingInst *SI, BasicBlock *BB) { 1129 switch (SI->getIntrinsicID()) { 1130 case Intrinsic::uadd_sat: 1131 return solveBlockValueBinaryOpImpl( 1132 BBLV, SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) { 1133 return CR1.uadd_sat(CR2); 1134 }); 1135 case Intrinsic::usub_sat: 1136 return solveBlockValueBinaryOpImpl( 1137 BBLV, SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) { 1138 return CR1.usub_sat(CR2); 1139 }); 1140 case Intrinsic::sadd_sat: 1141 return solveBlockValueBinaryOpImpl( 1142 BBLV, SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) { 1143 return CR1.sadd_sat(CR2); 1144 }); 1145 case Intrinsic::ssub_sat: 1146 return solveBlockValueBinaryOpImpl( 1147 BBLV, SI, BB, [](const ConstantRange &CR1, const ConstantRange &CR2) { 1148 return CR1.ssub_sat(CR2); 1149 }); 1150 default: 1151 llvm_unreachable("All llvm.sat intrinsic are handled."); 1152 } 1153 } 1154 1155 bool LazyValueInfoImpl::solveBlockValueIntrinsic(ValueLatticeElement &BBLV, 1156 IntrinsicInst *II, 1157 BasicBlock *BB) { 1158 if (auto *SI = dyn_cast<SaturatingInst>(II)) 1159 return solveBlockValueSaturatingIntrinsic(BBLV, SI, BB); 1160 1161 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 1162 << "' - overdefined (unknown intrinsic).\n"); 1163 BBLV = ValueLatticeElement::getOverdefined(); 1164 return true; 1165 } 1166 1167 bool LazyValueInfoImpl::solveBlockValueExtractValue( 1168 ValueLatticeElement &BBLV, ExtractValueInst *EVI, BasicBlock *BB) { 1169 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand())) 1170 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0) 1171 return solveBlockValueOverflowIntrinsic(BBLV, WO, BB); 1172 1173 // Handle extractvalue of insertvalue to allow further simplification 1174 // based on replaced with.overflow intrinsics. 1175 if (Value *V = SimplifyExtractValueInst( 1176 EVI->getAggregateOperand(), EVI->getIndices(), 1177 EVI->getModule()->getDataLayout())) { 1178 if (!hasBlockValue(V, BB)) { 1179 if (pushBlockValue({ BB, V })) 1180 return false; 1181 BBLV = ValueLatticeElement::getOverdefined(); 1182 return true; 1183 } 1184 BBLV = getBlockValue(V, BB); 1185 return true; 1186 } 1187 1188 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName() 1189 << "' - overdefined (unknown extractvalue).\n"); 1190 BBLV = ValueLatticeElement::getOverdefined(); 1191 return true; 1192 } 1193 1194 static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI, 1195 bool isTrueDest) { 1196 Value *LHS = ICI->getOperand(0); 1197 Value *RHS = ICI->getOperand(1); 1198 CmpInst::Predicate Predicate = ICI->getPredicate(); 1199 1200 if (isa<Constant>(RHS)) { 1201 if (ICI->isEquality() && LHS == Val) { 1202 // We know that V has the RHS constant if this is a true SETEQ or 1203 // false SETNE. 1204 if (isTrueDest == (Predicate == ICmpInst::ICMP_EQ)) 1205 return ValueLatticeElement::get(cast<Constant>(RHS)); 1206 else if (!isa<UndefValue>(RHS)) 1207 return ValueLatticeElement::getNot(cast<Constant>(RHS)); 1208 } 1209 } 1210 1211 if (!Val->getType()->isIntegerTy()) 1212 return ValueLatticeElement::getOverdefined(); 1213 1214 // Use ConstantRange::makeAllowedICmpRegion in order to determine the possible 1215 // range of Val guaranteed by the condition. Recognize comparisons in the from 1216 // of: 1217 // icmp <pred> Val, ... 1218 // icmp <pred> (add Val, Offset), ... 1219 // The latter is the range checking idiom that InstCombine produces. Subtract 1220 // the offset from the allowed range for RHS in this case. 1221 1222 // Val or (add Val, Offset) can be on either hand of the comparison 1223 if (LHS != Val && !match(LHS, m_Add(m_Specific(Val), m_ConstantInt()))) { 1224 std::swap(LHS, RHS); 1225 Predicate = CmpInst::getSwappedPredicate(Predicate); 1226 } 1227 1228 ConstantInt *Offset = nullptr; 1229 if (LHS != Val) 1230 match(LHS, m_Add(m_Specific(Val), m_ConstantInt(Offset))); 1231 1232 if (LHS == Val || Offset) { 1233 // Calculate the range of values that are allowed by the comparison 1234 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(), 1235 /*isFullSet=*/true); 1236 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) 1237 RHSRange = ConstantRange(CI->getValue()); 1238 else if (Instruction *I = dyn_cast<Instruction>(RHS)) 1239 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range)) 1240 RHSRange = getConstantRangeFromMetadata(*Ranges); 1241 1242 // If we're interested in the false dest, invert the condition 1243 CmpInst::Predicate Pred = 1244 isTrueDest ? Predicate : CmpInst::getInversePredicate(Predicate); 1245 ConstantRange TrueValues = 1246 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange); 1247 1248 if (Offset) // Apply the offset from above. 1249 TrueValues = TrueValues.subtract(Offset->getValue()); 1250 1251 return ValueLatticeElement::getRange(std::move(TrueValues)); 1252 } 1253 1254 return ValueLatticeElement::getOverdefined(); 1255 } 1256 1257 // Handle conditions of the form 1258 // extractvalue(op.with.overflow(%x, C), 1). 1259 static ValueLatticeElement getValueFromOverflowCondition( 1260 Value *Val, WithOverflowInst *WO, bool IsTrueDest) { 1261 // TODO: This only works with a constant RHS for now. We could also compute 1262 // the range of the RHS, but this doesn't fit into the current structure of 1263 // the edge value calculation. 1264 const APInt *C; 1265 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C))) 1266 return ValueLatticeElement::getOverdefined(); 1267 1268 // Calculate the possible values of %x for which no overflow occurs. 1269 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion( 1270 WO->getBinaryOp(), *C, WO->getNoWrapKind()); 1271 1272 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is 1273 // constrained to it's inverse (all values that might cause overflow). 1274 if (IsTrueDest) 1275 NWR = NWR.inverse(); 1276 return ValueLatticeElement::getRange(NWR); 1277 } 1278 1279 static ValueLatticeElement 1280 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest, 1281 DenseMap<Value*, ValueLatticeElement> &Visited); 1282 1283 static ValueLatticeElement 1284 getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest, 1285 DenseMap<Value*, ValueLatticeElement> &Visited) { 1286 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond)) 1287 return getValueFromICmpCondition(Val, ICI, isTrueDest); 1288 1289 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond)) 1290 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand())) 1291 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1) 1292 return getValueFromOverflowCondition(Val, WO, isTrueDest); 1293 1294 // Handle conditions in the form of (cond1 && cond2), we know that on the 1295 // true dest path both of the conditions hold. Similarly for conditions of 1296 // the form (cond1 || cond2), we know that on the false dest path neither 1297 // condition holds. 1298 BinaryOperator *BO = dyn_cast<BinaryOperator>(Cond); 1299 if (!BO || (isTrueDest && BO->getOpcode() != BinaryOperator::And) || 1300 (!isTrueDest && BO->getOpcode() != BinaryOperator::Or)) 1301 return ValueLatticeElement::getOverdefined(); 1302 1303 // Prevent infinite recursion if Cond references itself as in this example: 1304 // Cond: "%tmp4 = and i1 %tmp4, undef" 1305 // BL: "%tmp4 = and i1 %tmp4, undef" 1306 // BR: "i1 undef" 1307 Value *BL = BO->getOperand(0); 1308 Value *BR = BO->getOperand(1); 1309 if (BL == Cond || BR == Cond) 1310 return ValueLatticeElement::getOverdefined(); 1311 1312 return intersect(getValueFromCondition(Val, BL, isTrueDest, Visited), 1313 getValueFromCondition(Val, BR, isTrueDest, Visited)); 1314 } 1315 1316 static ValueLatticeElement 1317 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest, 1318 DenseMap<Value*, ValueLatticeElement> &Visited) { 1319 auto I = Visited.find(Cond); 1320 if (I != Visited.end()) 1321 return I->second; 1322 1323 auto Result = getValueFromConditionImpl(Val, Cond, isTrueDest, Visited); 1324 Visited[Cond] = Result; 1325 return Result; 1326 } 1327 1328 ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond, 1329 bool isTrueDest) { 1330 assert(Cond && "precondition"); 1331 DenseMap<Value*, ValueLatticeElement> Visited; 1332 return getValueFromCondition(Val, Cond, isTrueDest, Visited); 1333 } 1334 1335 // Return true if Usr has Op as an operand, otherwise false. 1336 static bool usesOperand(User *Usr, Value *Op) { 1337 return find(Usr->operands(), Op) != Usr->op_end(); 1338 } 1339 1340 // Return true if the instruction type of Val is supported by 1341 // constantFoldUser(). Currently CastInst and BinaryOperator only. Call this 1342 // before calling constantFoldUser() to find out if it's even worth attempting 1343 // to call it. 1344 static bool isOperationFoldable(User *Usr) { 1345 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr); 1346 } 1347 1348 // Check if Usr can be simplified to an integer constant when the value of one 1349 // of its operands Op is an integer constant OpConstVal. If so, return it as an 1350 // lattice value range with a single element or otherwise return an overdefined 1351 // lattice value. 1352 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, 1353 const APInt &OpConstVal, 1354 const DataLayout &DL) { 1355 assert(isOperationFoldable(Usr) && "Precondition"); 1356 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal); 1357 // Check if Usr can be simplified to a constant. 1358 if (auto *CI = dyn_cast<CastInst>(Usr)) { 1359 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op"); 1360 if (auto *C = dyn_cast_or_null<ConstantInt>( 1361 SimplifyCastInst(CI->getOpcode(), OpConst, 1362 CI->getDestTy(), DL))) { 1363 return ValueLatticeElement::getRange(ConstantRange(C->getValue())); 1364 } 1365 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) { 1366 bool Op0Match = BO->getOperand(0) == Op; 1367 bool Op1Match = BO->getOperand(1) == Op; 1368 assert((Op0Match || Op1Match) && 1369 "Operand 0 nor Operand 1 isn't a match"); 1370 Value *LHS = Op0Match ? OpConst : BO->getOperand(0); 1371 Value *RHS = Op1Match ? OpConst : BO->getOperand(1); 1372 if (auto *C = dyn_cast_or_null<ConstantInt>( 1373 SimplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) { 1374 return ValueLatticeElement::getRange(ConstantRange(C->getValue())); 1375 } 1376 } 1377 return ValueLatticeElement::getOverdefined(); 1378 } 1379 1380 /// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if 1381 /// Val is not constrained on the edge. Result is unspecified if return value 1382 /// is false. 1383 static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom, 1384 BasicBlock *BBTo, ValueLatticeElement &Result) { 1385 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we 1386 // know that v != 0. 1387 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) { 1388 // If this is a conditional branch and only one successor goes to BBTo, then 1389 // we may be able to infer something from the condition. 1390 if (BI->isConditional() && 1391 BI->getSuccessor(0) != BI->getSuccessor(1)) { 1392 bool isTrueDest = BI->getSuccessor(0) == BBTo; 1393 assert(BI->getSuccessor(!isTrueDest) == BBTo && 1394 "BBTo isn't a successor of BBFrom"); 1395 Value *Condition = BI->getCondition(); 1396 1397 // If V is the condition of the branch itself, then we know exactly what 1398 // it is. 1399 if (Condition == Val) { 1400 Result = ValueLatticeElement::get(ConstantInt::get( 1401 Type::getInt1Ty(Val->getContext()), isTrueDest)); 1402 return true; 1403 } 1404 1405 // If the condition of the branch is an equality comparison, we may be 1406 // able to infer the value. 1407 Result = getValueFromCondition(Val, Condition, isTrueDest); 1408 if (!Result.isOverdefined()) 1409 return true; 1410 1411 if (User *Usr = dyn_cast<User>(Val)) { 1412 assert(Result.isOverdefined() && "Result isn't overdefined"); 1413 // Check with isOperationFoldable() first to avoid linearly iterating 1414 // over the operands unnecessarily which can be expensive for 1415 // instructions with many operands. 1416 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) { 1417 const DataLayout &DL = BBTo->getModule()->getDataLayout(); 1418 if (usesOperand(Usr, Condition)) { 1419 // If Val has Condition as an operand and Val can be folded into a 1420 // constant with either Condition == true or Condition == false, 1421 // propagate the constant. 1422 // eg. 1423 // ; %Val is true on the edge to %then. 1424 // %Val = and i1 %Condition, true. 1425 // br %Condition, label %then, label %else 1426 APInt ConditionVal(1, isTrueDest ? 1 : 0); 1427 Result = constantFoldUser(Usr, Condition, ConditionVal, DL); 1428 } else { 1429 // If one of Val's operand has an inferred value, we may be able to 1430 // infer the value of Val. 1431 // eg. 1432 // ; %Val is 94 on the edge to %then. 1433 // %Val = add i8 %Op, 1 1434 // %Condition = icmp eq i8 %Op, 93 1435 // br i1 %Condition, label %then, label %else 1436 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) { 1437 Value *Op = Usr->getOperand(i); 1438 ValueLatticeElement OpLatticeVal = 1439 getValueFromCondition(Op, Condition, isTrueDest); 1440 if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) { 1441 Result = constantFoldUser(Usr, Op, OpConst.getValue(), DL); 1442 break; 1443 } 1444 } 1445 } 1446 } 1447 } 1448 if (!Result.isOverdefined()) 1449 return true; 1450 } 1451 } 1452 1453 // If the edge was formed by a switch on the value, then we may know exactly 1454 // what it is. 1455 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) { 1456 Value *Condition = SI->getCondition(); 1457 if (!isa<IntegerType>(Val->getType())) 1458 return false; 1459 bool ValUsesConditionAndMayBeFoldable = false; 1460 if (Condition != Val) { 1461 // Check if Val has Condition as an operand. 1462 if (User *Usr = dyn_cast<User>(Val)) 1463 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) && 1464 usesOperand(Usr, Condition); 1465 if (!ValUsesConditionAndMayBeFoldable) 1466 return false; 1467 } 1468 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) && 1469 "Condition != Val nor Val doesn't use Condition"); 1470 1471 bool DefaultCase = SI->getDefaultDest() == BBTo; 1472 unsigned BitWidth = Val->getType()->getIntegerBitWidth(); 1473 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/); 1474 1475 for (auto Case : SI->cases()) { 1476 APInt CaseValue = Case.getCaseValue()->getValue(); 1477 ConstantRange EdgeVal(CaseValue); 1478 if (ValUsesConditionAndMayBeFoldable) { 1479 User *Usr = cast<User>(Val); 1480 const DataLayout &DL = BBTo->getModule()->getDataLayout(); 1481 ValueLatticeElement EdgeLatticeVal = 1482 constantFoldUser(Usr, Condition, CaseValue, DL); 1483 if (EdgeLatticeVal.isOverdefined()) 1484 return false; 1485 EdgeVal = EdgeLatticeVal.getConstantRange(); 1486 } 1487 if (DefaultCase) { 1488 // It is possible that the default destination is the destination of 1489 // some cases. We cannot perform difference for those cases. 1490 // We know Condition != CaseValue in BBTo. In some cases we can use 1491 // this to infer Val == f(Condition) is != f(CaseValue). For now, we 1492 // only do this when f is identity (i.e. Val == Condition), but we 1493 // should be able to do this for any injective f. 1494 if (Case.getCaseSuccessor() != BBTo && Condition == Val) 1495 EdgesVals = EdgesVals.difference(EdgeVal); 1496 } else if (Case.getCaseSuccessor() == BBTo) 1497 EdgesVals = EdgesVals.unionWith(EdgeVal); 1498 } 1499 Result = ValueLatticeElement::getRange(std::move(EdgesVals)); 1500 return true; 1501 } 1502 return false; 1503 } 1504 1505 /// Compute the value of Val on the edge BBFrom -> BBTo or the value at 1506 /// the basic block if the edge does not constrain Val. 1507 bool LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom, 1508 BasicBlock *BBTo, 1509 ValueLatticeElement &Result, 1510 Instruction *CxtI) { 1511 // If already a constant, there is nothing to compute. 1512 if (Constant *VC = dyn_cast<Constant>(Val)) { 1513 Result = ValueLatticeElement::get(VC); 1514 return true; 1515 } 1516 1517 ValueLatticeElement LocalResult; 1518 if (!getEdgeValueLocal(Val, BBFrom, BBTo, LocalResult)) 1519 // If we couldn't constrain the value on the edge, LocalResult doesn't 1520 // provide any information. 1521 LocalResult = ValueLatticeElement::getOverdefined(); 1522 1523 if (hasSingleValue(LocalResult)) { 1524 // Can't get any more precise here 1525 Result = LocalResult; 1526 return true; 1527 } 1528 1529 if (!hasBlockValue(Val, BBFrom)) { 1530 if (pushBlockValue(std::make_pair(BBFrom, Val))) 1531 return false; 1532 // No new information. 1533 Result = LocalResult; 1534 return true; 1535 } 1536 1537 // Try to intersect ranges of the BB and the constraint on the edge. 1538 ValueLatticeElement InBlock = getBlockValue(Val, BBFrom); 1539 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, 1540 BBFrom->getTerminator()); 1541 // We can use the context instruction (generically the ultimate instruction 1542 // the calling pass is trying to simplify) here, even though the result of 1543 // this function is generally cached when called from the solve* functions 1544 // (and that cached result might be used with queries using a different 1545 // context instruction), because when this function is called from the solve* 1546 // functions, the context instruction is not provided. When called from 1547 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided, 1548 // but then the result is not cached. 1549 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI); 1550 1551 Result = intersect(LocalResult, InBlock); 1552 return true; 1553 } 1554 1555 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB, 1556 Instruction *CxtI) { 1557 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '" 1558 << BB->getName() << "'\n"); 1559 1560 assert(BlockValueStack.empty() && BlockValueSet.empty()); 1561 if (!hasBlockValue(V, BB)) { 1562 pushBlockValue(std::make_pair(BB, V)); 1563 solve(); 1564 } 1565 ValueLatticeElement Result = getBlockValue(V, BB); 1566 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI); 1567 1568 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n"); 1569 return Result; 1570 } 1571 1572 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) { 1573 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName() 1574 << "'\n"); 1575 1576 if (auto *C = dyn_cast<Constant>(V)) 1577 return ValueLatticeElement::get(C); 1578 1579 ValueLatticeElement Result = ValueLatticeElement::getOverdefined(); 1580 if (auto *I = dyn_cast<Instruction>(V)) 1581 Result = getFromRangeMetadata(I); 1582 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI); 1583 1584 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n"); 1585 return Result; 1586 } 1587 1588 ValueLatticeElement LazyValueInfoImpl:: 1589 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, 1590 Instruction *CxtI) { 1591 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '" 1592 << FromBB->getName() << "' to '" << ToBB->getName() 1593 << "'\n"); 1594 1595 ValueLatticeElement Result; 1596 if (!getEdgeValue(V, FromBB, ToBB, Result, CxtI)) { 1597 solve(); 1598 bool WasFastQuery = getEdgeValue(V, FromBB, ToBB, Result, CxtI); 1599 (void)WasFastQuery; 1600 assert(WasFastQuery && "More work to do after problem solved?"); 1601 } 1602 1603 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n"); 1604 return Result; 1605 } 1606 1607 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, 1608 BasicBlock *NewSucc) { 1609 TheCache.threadEdgeImpl(OldSucc, NewSucc); 1610 } 1611 1612 //===----------------------------------------------------------------------===// 1613 // LazyValueInfo Impl 1614 //===----------------------------------------------------------------------===// 1615 1616 /// This lazily constructs the LazyValueInfoImpl. 1617 static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC, 1618 const DataLayout *DL, 1619 DominatorTree *DT = nullptr) { 1620 if (!PImpl) { 1621 assert(DL && "getCache() called with a null DataLayout"); 1622 PImpl = new LazyValueInfoImpl(AC, *DL, DT); 1623 } 1624 return *static_cast<LazyValueInfoImpl*>(PImpl); 1625 } 1626 1627 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) { 1628 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1629 const DataLayout &DL = F.getParent()->getDataLayout(); 1630 1631 DominatorTreeWrapperPass *DTWP = 1632 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1633 Info.DT = DTWP ? &DTWP->getDomTree() : nullptr; 1634 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1635 1636 if (Info.PImpl) 1637 getImpl(Info.PImpl, Info.AC, &DL, Info.DT).clear(); 1638 1639 // Fully lazy. 1640 return false; 1641 } 1642 1643 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1644 AU.setPreservesAll(); 1645 AU.addRequired<AssumptionCacheTracker>(); 1646 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1647 } 1648 1649 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; } 1650 1651 LazyValueInfo::~LazyValueInfo() { releaseMemory(); } 1652 1653 void LazyValueInfo::releaseMemory() { 1654 // If the cache was allocated, free it. 1655 if (PImpl) { 1656 delete &getImpl(PImpl, AC, nullptr); 1657 PImpl = nullptr; 1658 } 1659 } 1660 1661 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA, 1662 FunctionAnalysisManager::Invalidator &Inv) { 1663 // We need to invalidate if we have either failed to preserve this analyses 1664 // result directly or if any of its dependencies have been invalidated. 1665 auto PAC = PA.getChecker<LazyValueAnalysis>(); 1666 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 1667 (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA))) 1668 return true; 1669 1670 return false; 1671 } 1672 1673 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); } 1674 1675 LazyValueInfo LazyValueAnalysis::run(Function &F, 1676 FunctionAnalysisManager &FAM) { 1677 auto &AC = FAM.getResult<AssumptionAnalysis>(F); 1678 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F); 1679 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F); 1680 1681 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI, DT); 1682 } 1683 1684 /// Returns true if we can statically tell that this value will never be a 1685 /// "useful" constant. In practice, this means we've got something like an 1686 /// alloca or a malloc call for which a comparison against a constant can 1687 /// only be guarding dead code. Note that we are potentially giving up some 1688 /// precision in dead code (a constant result) in favour of avoiding a 1689 /// expensive search for a easily answered common query. 1690 static bool isKnownNonConstant(Value *V) { 1691 V = V->stripPointerCasts(); 1692 // The return val of alloc cannot be a Constant. 1693 if (isa<AllocaInst>(V)) 1694 return true; 1695 return false; 1696 } 1697 1698 Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB, 1699 Instruction *CxtI) { 1700 // Bail out early if V is known not to be a Constant. 1701 if (isKnownNonConstant(V)) 1702 return nullptr; 1703 1704 const DataLayout &DL = BB->getModule()->getDataLayout(); 1705 ValueLatticeElement Result = 1706 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI); 1707 1708 if (Result.isConstant()) 1709 return Result.getConstant(); 1710 if (Result.isConstantRange()) { 1711 const ConstantRange &CR = Result.getConstantRange(); 1712 if (const APInt *SingleVal = CR.getSingleElement()) 1713 return ConstantInt::get(V->getContext(), *SingleVal); 1714 } 1715 return nullptr; 1716 } 1717 1718 ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB, 1719 Instruction *CxtI) { 1720 assert(V->getType()->isIntegerTy()); 1721 unsigned Width = V->getType()->getIntegerBitWidth(); 1722 const DataLayout &DL = BB->getModule()->getDataLayout(); 1723 ValueLatticeElement Result = 1724 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI); 1725 if (Result.isUnknown()) 1726 return ConstantRange::getEmpty(Width); 1727 if (Result.isConstantRange()) 1728 return Result.getConstantRange(); 1729 // We represent ConstantInt constants as constant ranges but other kinds 1730 // of integer constants, i.e. ConstantExpr will be tagged as constants 1731 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && 1732 "ConstantInt value must be represented as constantrange"); 1733 return ConstantRange::getFull(Width); 1734 } 1735 1736 /// Determine whether the specified value is known to be a 1737 /// constant on the specified edge. Return null if not. 1738 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB, 1739 BasicBlock *ToBB, 1740 Instruction *CxtI) { 1741 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 1742 ValueLatticeElement Result = 1743 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI); 1744 1745 if (Result.isConstant()) 1746 return Result.getConstant(); 1747 if (Result.isConstantRange()) { 1748 const ConstantRange &CR = Result.getConstantRange(); 1749 if (const APInt *SingleVal = CR.getSingleElement()) 1750 return ConstantInt::get(V->getContext(), *SingleVal); 1751 } 1752 return nullptr; 1753 } 1754 1755 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V, 1756 BasicBlock *FromBB, 1757 BasicBlock *ToBB, 1758 Instruction *CxtI) { 1759 unsigned Width = V->getType()->getIntegerBitWidth(); 1760 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 1761 ValueLatticeElement Result = 1762 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI); 1763 1764 if (Result.isUnknown()) 1765 return ConstantRange::getEmpty(Width); 1766 if (Result.isConstantRange()) 1767 return Result.getConstantRange(); 1768 // We represent ConstantInt constants as constant ranges but other kinds 1769 // of integer constants, i.e. ConstantExpr will be tagged as constants 1770 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && 1771 "ConstantInt value must be represented as constantrange"); 1772 return ConstantRange::getFull(Width); 1773 } 1774 1775 static LazyValueInfo::Tristate 1776 getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val, 1777 const DataLayout &DL, TargetLibraryInfo *TLI) { 1778 // If we know the value is a constant, evaluate the conditional. 1779 Constant *Res = nullptr; 1780 if (Val.isConstant()) { 1781 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI); 1782 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res)) 1783 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True; 1784 return LazyValueInfo::Unknown; 1785 } 1786 1787 if (Val.isConstantRange()) { 1788 ConstantInt *CI = dyn_cast<ConstantInt>(C); 1789 if (!CI) return LazyValueInfo::Unknown; 1790 1791 const ConstantRange &CR = Val.getConstantRange(); 1792 if (Pred == ICmpInst::ICMP_EQ) { 1793 if (!CR.contains(CI->getValue())) 1794 return LazyValueInfo::False; 1795 1796 if (CR.isSingleElement()) 1797 return LazyValueInfo::True; 1798 } else if (Pred == ICmpInst::ICMP_NE) { 1799 if (!CR.contains(CI->getValue())) 1800 return LazyValueInfo::True; 1801 1802 if (CR.isSingleElement()) 1803 return LazyValueInfo::False; 1804 } else { 1805 // Handle more complex predicates. 1806 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion( 1807 (ICmpInst::Predicate)Pred, CI->getValue()); 1808 if (TrueValues.contains(CR)) 1809 return LazyValueInfo::True; 1810 if (TrueValues.inverse().contains(CR)) 1811 return LazyValueInfo::False; 1812 } 1813 return LazyValueInfo::Unknown; 1814 } 1815 1816 if (Val.isNotConstant()) { 1817 // If this is an equality comparison, we can try to fold it knowing that 1818 // "V != C1". 1819 if (Pred == ICmpInst::ICMP_EQ) { 1820 // !C1 == C -> false iff C1 == C. 1821 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, 1822 Val.getNotConstant(), C, DL, 1823 TLI); 1824 if (Res->isNullValue()) 1825 return LazyValueInfo::False; 1826 } else if (Pred == ICmpInst::ICMP_NE) { 1827 // !C1 != C -> true iff C1 == C. 1828 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE, 1829 Val.getNotConstant(), C, DL, 1830 TLI); 1831 if (Res->isNullValue()) 1832 return LazyValueInfo::True; 1833 } 1834 return LazyValueInfo::Unknown; 1835 } 1836 1837 return LazyValueInfo::Unknown; 1838 } 1839 1840 /// Determine whether the specified value comparison with a constant is known to 1841 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate. 1842 LazyValueInfo::Tristate 1843 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, 1844 BasicBlock *FromBB, BasicBlock *ToBB, 1845 Instruction *CxtI) { 1846 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 1847 ValueLatticeElement Result = 1848 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI); 1849 1850 return getPredicateResult(Pred, C, Result, DL, TLI); 1851 } 1852 1853 LazyValueInfo::Tristate 1854 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C, 1855 Instruction *CxtI) { 1856 // Is or is not NonNull are common predicates being queried. If 1857 // isKnownNonZero can tell us the result of the predicate, we can 1858 // return it quickly. But this is only a fastpath, and falling 1859 // through would still be correct. 1860 const DataLayout &DL = CxtI->getModule()->getDataLayout(); 1861 if (V->getType()->isPointerTy() && C->isNullValue() && 1862 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) { 1863 if (Pred == ICmpInst::ICMP_EQ) 1864 return LazyValueInfo::False; 1865 else if (Pred == ICmpInst::ICMP_NE) 1866 return LazyValueInfo::True; 1867 } 1868 ValueLatticeElement Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI); 1869 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI); 1870 if (Ret != Unknown) 1871 return Ret; 1872 1873 // Note: The following bit of code is somewhat distinct from the rest of LVI; 1874 // LVI as a whole tries to compute a lattice value which is conservatively 1875 // correct at a given location. In this case, we have a predicate which we 1876 // weren't able to prove about the merged result, and we're pushing that 1877 // predicate back along each incoming edge to see if we can prove it 1878 // separately for each input. As a motivating example, consider: 1879 // bb1: 1880 // %v1 = ... ; constantrange<1, 5> 1881 // br label %merge 1882 // bb2: 1883 // %v2 = ... ; constantrange<10, 20> 1884 // br label %merge 1885 // merge: 1886 // %phi = phi [%v1, %v2] ; constantrange<1,20> 1887 // %pred = icmp eq i32 %phi, 8 1888 // We can't tell from the lattice value for '%phi' that '%pred' is false 1889 // along each path, but by checking the predicate over each input separately, 1890 // we can. 1891 // We limit the search to one step backwards from the current BB and value. 1892 // We could consider extending this to search further backwards through the 1893 // CFG and/or value graph, but there are non-obvious compile time vs quality 1894 // tradeoffs. 1895 if (CxtI) { 1896 BasicBlock *BB = CxtI->getParent(); 1897 1898 // Function entry or an unreachable block. Bail to avoid confusing 1899 // analysis below. 1900 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 1901 if (PI == PE) 1902 return Unknown; 1903 1904 // If V is a PHI node in the same block as the context, we need to ask 1905 // questions about the predicate as applied to the incoming value along 1906 // each edge. This is useful for eliminating cases where the predicate is 1907 // known along all incoming edges. 1908 if (auto *PHI = dyn_cast<PHINode>(V)) 1909 if (PHI->getParent() == BB) { 1910 Tristate Baseline = Unknown; 1911 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) { 1912 Value *Incoming = PHI->getIncomingValue(i); 1913 BasicBlock *PredBB = PHI->getIncomingBlock(i); 1914 // Note that PredBB may be BB itself. 1915 Tristate Result = getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, 1916 CxtI); 1917 1918 // Keep going as long as we've seen a consistent known result for 1919 // all inputs. 1920 Baseline = (i == 0) ? Result /* First iteration */ 1921 : (Baseline == Result ? Baseline : Unknown); /* All others */ 1922 if (Baseline == Unknown) 1923 break; 1924 } 1925 if (Baseline != Unknown) 1926 return Baseline; 1927 } 1928 1929 // For a comparison where the V is outside this block, it's possible 1930 // that we've branched on it before. Look to see if the value is known 1931 // on all incoming edges. 1932 if (!isa<Instruction>(V) || 1933 cast<Instruction>(V)->getParent() != BB) { 1934 // For predecessor edge, determine if the comparison is true or false 1935 // on that edge. If they're all true or all false, we can conclude 1936 // the value of the comparison in this block. 1937 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI); 1938 if (Baseline != Unknown) { 1939 // Check that all remaining incoming values match the first one. 1940 while (++PI != PE) { 1941 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI); 1942 if (Ret != Baseline) break; 1943 } 1944 // If we terminated early, then one of the values didn't match. 1945 if (PI == PE) { 1946 return Baseline; 1947 } 1948 } 1949 } 1950 } 1951 return Unknown; 1952 } 1953 1954 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, 1955 BasicBlock *NewSucc) { 1956 if (PImpl) { 1957 const DataLayout &DL = PredBB->getModule()->getDataLayout(); 1958 getImpl(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc); 1959 } 1960 } 1961 1962 void LazyValueInfo::eraseBlock(BasicBlock *BB) { 1963 if (PImpl) { 1964 const DataLayout &DL = BB->getModule()->getDataLayout(); 1965 getImpl(PImpl, AC, &DL, DT).eraseBlock(BB); 1966 } 1967 } 1968 1969 1970 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) { 1971 if (PImpl) { 1972 getImpl(PImpl, AC, DL, DT).printLVI(F, DTree, OS); 1973 } 1974 } 1975 1976 void LazyValueInfo::disableDT() { 1977 if (PImpl) 1978 getImpl(PImpl, AC, DL, DT).disableDT(); 1979 } 1980 1981 void LazyValueInfo::enableDT() { 1982 if (PImpl) 1983 getImpl(PImpl, AC, DL, DT).enableDT(); 1984 } 1985 1986 // Print the LVI for the function arguments at the start of each basic block. 1987 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot( 1988 const BasicBlock *BB, formatted_raw_ostream &OS) { 1989 // Find if there are latticevalues defined for arguments of the function. 1990 auto *F = BB->getParent(); 1991 for (auto &Arg : F->args()) { 1992 ValueLatticeElement Result = LVIImpl->getValueInBlock( 1993 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB)); 1994 if (Result.isUnknown()) 1995 continue; 1996 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n"; 1997 } 1998 } 1999 2000 // This function prints the LVI analysis for the instruction I at the beginning 2001 // of various basic blocks. It relies on calculated values that are stored in 2002 // the LazyValueInfoCache, and in the absence of cached values, recalculate the 2003 // LazyValueInfo for `I`, and print that info. 2004 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot( 2005 const Instruction *I, formatted_raw_ostream &OS) { 2006 2007 auto *ParentBB = I->getParent(); 2008 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI; 2009 // We can generate (solve) LVI values only for blocks that are dominated by 2010 // the I's parent. However, to avoid generating LVI for all dominating blocks, 2011 // that contain redundant/uninteresting information, we print LVI for 2012 // blocks that may use this LVI information (such as immediate successor 2013 // blocks, and blocks that contain uses of `I`). 2014 auto printResult = [&](const BasicBlock *BB) { 2015 if (!BlocksContainingLVI.insert(BB).second) 2016 return; 2017 ValueLatticeElement Result = LVIImpl->getValueInBlock( 2018 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB)); 2019 OS << "; LatticeVal for: '" << *I << "' in BB: '"; 2020 BB->printAsOperand(OS, false); 2021 OS << "' is: " << Result << "\n"; 2022 }; 2023 2024 printResult(ParentBB); 2025 // Print the LVI analysis results for the immediate successor blocks, that 2026 // are dominated by `ParentBB`. 2027 for (auto *BBSucc : successors(ParentBB)) 2028 if (DT.dominates(ParentBB, BBSucc)) 2029 printResult(BBSucc); 2030 2031 // Print LVI in blocks where `I` is used. 2032 for (auto *U : I->users()) 2033 if (auto *UseI = dyn_cast<Instruction>(U)) 2034 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent())) 2035 printResult(UseI->getParent()); 2036 2037 } 2038 2039 namespace { 2040 // Printer class for LazyValueInfo results. 2041 class LazyValueInfoPrinter : public FunctionPass { 2042 public: 2043 static char ID; // Pass identification, replacement for typeid 2044 LazyValueInfoPrinter() : FunctionPass(ID) { 2045 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry()); 2046 } 2047 2048 void getAnalysisUsage(AnalysisUsage &AU) const override { 2049 AU.setPreservesAll(); 2050 AU.addRequired<LazyValueInfoWrapperPass>(); 2051 AU.addRequired<DominatorTreeWrapperPass>(); 2052 } 2053 2054 // Get the mandatory dominator tree analysis and pass this in to the 2055 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional. 2056 bool runOnFunction(Function &F) override { 2057 dbgs() << "LVI for function '" << F.getName() << "':\n"; 2058 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI(); 2059 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2060 LVI.printLVI(F, DTree, dbgs()); 2061 return false; 2062 } 2063 }; 2064 } 2065 2066 char LazyValueInfoPrinter::ID = 0; 2067 INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info", 2068 "Lazy Value Info Printer Pass", false, false) 2069 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass) 2070 INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info", 2071 "Lazy Value Info Printer Pass", false, false) 2072