1 //===- SCCPSolver.cpp - SCCP Utility --------------------------- *- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // \file 10 // This file implements the Sparse Conditional Constant Propagation (SCCP) 11 // utility. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/SCCPSolver.h" 16 #include "llvm/Analysis/ConstantFolding.h" 17 #include "llvm/Analysis/InstructionSimplify.h" 18 #include "llvm/Analysis/ValueLattice.h" 19 #include "llvm/Analysis/ValueLatticeUtils.h" 20 #include "llvm/IR/InstVisitor.h" 21 #include "llvm/Support/Casting.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Support/ErrorHandling.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include "llvm/Transforms/Utils/Local.h" 26 #include <cassert> 27 #include <utility> 28 #include <vector> 29 30 using namespace llvm; 31 32 #define DEBUG_TYPE "sccp" 33 34 // The maximum number of range extensions allowed for operations requiring 35 // widening. 36 static const unsigned MaxNumRangeExtensions = 10; 37 38 /// Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions. 39 static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts() { 40 return ValueLatticeElement::MergeOptions().setMaxWidenSteps( 41 MaxNumRangeExtensions); 42 } 43 44 namespace llvm { 45 46 bool SCCPSolver::isConstant(const ValueLatticeElement &LV) { 47 return LV.isConstant() || 48 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 49 } 50 51 bool SCCPSolver::isOverdefined(const ValueLatticeElement &LV) { 52 return !LV.isUnknownOrUndef() && !SCCPSolver::isConstant(LV); 53 } 54 55 static bool canRemoveInstruction(Instruction *I) { 56 if (wouldInstructionBeTriviallyDead(I)) 57 return true; 58 59 // Some instructions can be handled but are rejected above. Catch 60 // those cases by falling through to here. 61 // TODO: Mark globals as being constant earlier, so 62 // TODO: wouldInstructionBeTriviallyDead() knows that atomic loads 63 // TODO: are safe to remove. 64 return isa<LoadInst>(I); 65 } 66 67 bool SCCPSolver::tryToReplaceWithConstant(Value *V) { 68 Constant *Const = nullptr; 69 if (V->getType()->isStructTy()) { 70 std::vector<ValueLatticeElement> IVs = getStructLatticeValueFor(V); 71 if (llvm::any_of(IVs, isOverdefined)) 72 return false; 73 std::vector<Constant *> ConstVals; 74 auto *ST = cast<StructType>(V->getType()); 75 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 76 ValueLatticeElement V = IVs[i]; 77 ConstVals.push_back(SCCPSolver::isConstant(V) 78 ? getConstant(V) 79 : UndefValue::get(ST->getElementType(i))); 80 } 81 Const = ConstantStruct::get(ST, ConstVals); 82 } else { 83 const ValueLatticeElement &IV = getLatticeValueFor(V); 84 if (isOverdefined(IV)) 85 return false; 86 87 Const = SCCPSolver::isConstant(IV) ? getConstant(IV) 88 : UndefValue::get(V->getType()); 89 } 90 assert(Const && "Constant is nullptr here!"); 91 92 // Replacing `musttail` instructions with constant breaks `musttail` invariant 93 // unless the call itself can be removed. 94 // Calls with "clang.arc.attachedcall" implicitly use the return value and 95 // those uses cannot be updated with a constant. 96 CallBase *CB = dyn_cast<CallBase>(V); 97 if (CB && ((CB->isMustTailCall() && 98 !canRemoveInstruction(CB)) || 99 CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall))) { 100 Function *F = CB->getCalledFunction(); 101 102 // Don't zap returns of the callee 103 if (F) 104 addToMustPreserveReturnsInFunctions(F); 105 106 LLVM_DEBUG(dbgs() << " Can\'t treat the result of call " << *CB 107 << " as a constant\n"); 108 return false; 109 } 110 111 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n'); 112 113 // Replaces all of the uses of a variable with uses of the constant. 114 V->replaceAllUsesWith(Const); 115 return true; 116 } 117 118 /// Try to replace signed instructions with their unsigned equivalent. 119 static bool replaceSignedInst(SCCPSolver &Solver, 120 SmallPtrSetImpl<Value *> &InsertedValues, 121 Instruction &Inst) { 122 // Determine if a signed value is known to be >= 0. 123 auto isNonNegative = [&Solver](Value *V) { 124 // If this value was constant-folded, it may not have a solver entry. 125 // Handle integers. Otherwise, return false. 126 if (auto *C = dyn_cast<Constant>(V)) { 127 auto *CInt = dyn_cast<ConstantInt>(C); 128 return CInt && !CInt->isNegative(); 129 } 130 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V); 131 return IV.isConstantRange(/*UndefAllowed=*/false) && 132 IV.getConstantRange().isAllNonNegative(); 133 }; 134 135 Instruction *NewInst = nullptr; 136 switch (Inst.getOpcode()) { 137 // Note: We do not fold sitofp -> uitofp here because that could be more 138 // expensive in codegen and may not be reversible in the backend. 139 case Instruction::SExt: { 140 // If the source value is not negative, this is a zext. 141 Value *Op0 = Inst.getOperand(0); 142 if (InsertedValues.count(Op0) || !isNonNegative(Op0)) 143 return false; 144 NewInst = new ZExtInst(Op0, Inst.getType(), "", &Inst); 145 break; 146 } 147 case Instruction::AShr: { 148 // If the shifted value is not negative, this is a logical shift right. 149 Value *Op0 = Inst.getOperand(0); 150 if (InsertedValues.count(Op0) || !isNonNegative(Op0)) 151 return false; 152 NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", &Inst); 153 break; 154 } 155 case Instruction::SDiv: 156 case Instruction::SRem: { 157 // If both operands are not negative, this is the same as udiv/urem. 158 Value *Op0 = Inst.getOperand(0), *Op1 = Inst.getOperand(1); 159 if (InsertedValues.count(Op0) || InsertedValues.count(Op1) || 160 !isNonNegative(Op0) || !isNonNegative(Op1)) 161 return false; 162 auto NewOpcode = Inst.getOpcode() == Instruction::SDiv ? Instruction::UDiv 163 : Instruction::URem; 164 NewInst = BinaryOperator::Create(NewOpcode, Op0, Op1, "", &Inst); 165 break; 166 } 167 default: 168 return false; 169 } 170 171 // Wire up the new instruction and update state. 172 assert(NewInst && "Expected replacement instruction"); 173 NewInst->takeName(&Inst); 174 InsertedValues.insert(NewInst); 175 Inst.replaceAllUsesWith(NewInst); 176 Solver.removeLatticeValueFor(&Inst); 177 Inst.eraseFromParent(); 178 return true; 179 } 180 181 bool SCCPSolver::simplifyInstsInBlock(BasicBlock &BB, 182 SmallPtrSetImpl<Value *> &InsertedValues, 183 Statistic &InstRemovedStat, 184 Statistic &InstReplacedStat) { 185 bool MadeChanges = false; 186 for (Instruction &Inst : make_early_inc_range(BB)) { 187 if (Inst.getType()->isVoidTy()) 188 continue; 189 if (tryToReplaceWithConstant(&Inst)) { 190 if (canRemoveInstruction(&Inst)) 191 Inst.eraseFromParent(); 192 193 MadeChanges = true; 194 ++InstRemovedStat; 195 } else if (replaceSignedInst(*this, InsertedValues, Inst)) { 196 MadeChanges = true; 197 ++InstReplacedStat; 198 } 199 } 200 return MadeChanges; 201 } 202 203 bool SCCPSolver::removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU, 204 BasicBlock *&NewUnreachableBB) const { 205 SmallPtrSet<BasicBlock *, 8> FeasibleSuccessors; 206 bool HasNonFeasibleEdges = false; 207 for (BasicBlock *Succ : successors(BB)) { 208 if (isEdgeFeasible(BB, Succ)) 209 FeasibleSuccessors.insert(Succ); 210 else 211 HasNonFeasibleEdges = true; 212 } 213 214 // All edges feasible, nothing to do. 215 if (!HasNonFeasibleEdges) 216 return false; 217 218 // SCCP can only determine non-feasible edges for br, switch and indirectbr. 219 Instruction *TI = BB->getTerminator(); 220 assert((isa<BranchInst>(TI) || isa<SwitchInst>(TI) || 221 isa<IndirectBrInst>(TI)) && 222 "Terminator must be a br, switch or indirectbr"); 223 224 if (FeasibleSuccessors.size() == 0) { 225 // Branch on undef/poison, replace with unreachable. 226 SmallPtrSet<BasicBlock *, 8> SeenSuccs; 227 SmallVector<DominatorTree::UpdateType, 8> Updates; 228 for (BasicBlock *Succ : successors(BB)) { 229 Succ->removePredecessor(BB); 230 if (SeenSuccs.insert(Succ).second) 231 Updates.push_back({DominatorTree::Delete, BB, Succ}); 232 } 233 TI->eraseFromParent(); 234 new UnreachableInst(BB->getContext(), BB); 235 DTU.applyUpdatesPermissive(Updates); 236 } else if (FeasibleSuccessors.size() == 1) { 237 // Replace with an unconditional branch to the only feasible successor. 238 BasicBlock *OnlyFeasibleSuccessor = *FeasibleSuccessors.begin(); 239 SmallVector<DominatorTree::UpdateType, 8> Updates; 240 bool HaveSeenOnlyFeasibleSuccessor = false; 241 for (BasicBlock *Succ : successors(BB)) { 242 if (Succ == OnlyFeasibleSuccessor && !HaveSeenOnlyFeasibleSuccessor) { 243 // Don't remove the edge to the only feasible successor the first time 244 // we see it. We still do need to remove any multi-edges to it though. 245 HaveSeenOnlyFeasibleSuccessor = true; 246 continue; 247 } 248 249 Succ->removePredecessor(BB); 250 Updates.push_back({DominatorTree::Delete, BB, Succ}); 251 } 252 253 BranchInst::Create(OnlyFeasibleSuccessor, BB); 254 TI->eraseFromParent(); 255 DTU.applyUpdatesPermissive(Updates); 256 } else if (FeasibleSuccessors.size() > 1) { 257 SwitchInstProfUpdateWrapper SI(*cast<SwitchInst>(TI)); 258 SmallVector<DominatorTree::UpdateType, 8> Updates; 259 260 // If the default destination is unfeasible it will never be taken. Replace 261 // it with a new block with a single Unreachable instruction. 262 BasicBlock *DefaultDest = SI->getDefaultDest(); 263 if (!FeasibleSuccessors.contains(DefaultDest)) { 264 if (!NewUnreachableBB) { 265 NewUnreachableBB = 266 BasicBlock::Create(DefaultDest->getContext(), "default.unreachable", 267 DefaultDest->getParent(), DefaultDest); 268 new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB); 269 } 270 271 SI->setDefaultDest(NewUnreachableBB); 272 Updates.push_back({DominatorTree::Delete, BB, DefaultDest}); 273 Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB}); 274 } 275 276 for (auto CI = SI->case_begin(); CI != SI->case_end();) { 277 if (FeasibleSuccessors.contains(CI->getCaseSuccessor())) { 278 ++CI; 279 continue; 280 } 281 282 BasicBlock *Succ = CI->getCaseSuccessor(); 283 Succ->removePredecessor(BB); 284 Updates.push_back({DominatorTree::Delete, BB, Succ}); 285 SI.removeCase(CI); 286 // Don't increment CI, as we removed a case. 287 } 288 289 DTU.applyUpdatesPermissive(Updates); 290 } else { 291 llvm_unreachable("Must have at least one feasible successor"); 292 } 293 return true; 294 } 295 296 /// Helper class for SCCPSolver. This implements the instruction visitor and 297 /// holds all the state. 298 class SCCPInstVisitor : public InstVisitor<SCCPInstVisitor> { 299 const DataLayout &DL; 300 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 301 SmallPtrSet<BasicBlock *, 8> BBExecutable; // The BBs that are executable. 302 DenseMap<Value *, ValueLatticeElement> 303 ValueState; // The state each value is in. 304 305 /// StructValueState - This maintains ValueState for values that have 306 /// StructType, for example for formal arguments, calls, insertelement, etc. 307 DenseMap<std::pair<Value *, unsigned>, ValueLatticeElement> StructValueState; 308 309 /// GlobalValue - If we are tracking any values for the contents of a global 310 /// variable, we keep a mapping from the constant accessor to the element of 311 /// the global, to the currently known value. If the value becomes 312 /// overdefined, it's entry is simply removed from this map. 313 DenseMap<GlobalVariable *, ValueLatticeElement> TrackedGlobals; 314 315 /// TrackedRetVals - If we are tracking arguments into and the return 316 /// value out of a function, it will have an entry in this map, indicating 317 /// what the known return value for the function is. 318 MapVector<Function *, ValueLatticeElement> TrackedRetVals; 319 320 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions 321 /// that return multiple values. 322 MapVector<std::pair<Function *, unsigned>, ValueLatticeElement> 323 TrackedMultipleRetVals; 324 325 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is 326 /// represented here for efficient lookup. 327 SmallPtrSet<Function *, 16> MRVFunctionsTracked; 328 329 /// A list of functions whose return cannot be modified. 330 SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions; 331 332 /// TrackingIncomingArguments - This is the set of functions for whose 333 /// arguments we make optimistic assumptions about and try to prove as 334 /// constants. 335 SmallPtrSet<Function *, 16> TrackingIncomingArguments; 336 337 /// The reason for two worklists is that overdefined is the lowest state 338 /// on the lattice, and moving things to overdefined as fast as possible 339 /// makes SCCP converge much faster. 340 /// 341 /// By having a separate worklist, we accomplish this because everything 342 /// possibly overdefined will become overdefined at the soonest possible 343 /// point. 344 SmallVector<Value *, 64> OverdefinedInstWorkList; 345 SmallVector<Value *, 64> InstWorkList; 346 347 // The BasicBlock work list 348 SmallVector<BasicBlock *, 64> BBWorkList; 349 350 /// KnownFeasibleEdges - Entries in this set are edges which have already had 351 /// PHI nodes retriggered. 352 using Edge = std::pair<BasicBlock *, BasicBlock *>; 353 DenseSet<Edge> KnownFeasibleEdges; 354 355 DenseMap<Function *, AnalysisResultsForFn> AnalysisResults; 356 DenseMap<Value *, SmallPtrSet<User *, 2>> AdditionalUsers; 357 358 LLVMContext &Ctx; 359 360 private: 361 ConstantInt *getConstantInt(const ValueLatticeElement &IV) const { 362 return dyn_cast_or_null<ConstantInt>(getConstant(IV)); 363 } 364 365 // pushToWorkList - Helper for markConstant/markOverdefined 366 void pushToWorkList(ValueLatticeElement &IV, Value *V); 367 368 // Helper to push \p V to the worklist, after updating it to \p IV. Also 369 // prints a debug message with the updated value. 370 void pushToWorkListMsg(ValueLatticeElement &IV, Value *V); 371 372 // markConstant - Make a value be marked as "constant". If the value 373 // is not already a constant, add it to the instruction work list so that 374 // the users of the instruction are updated later. 375 bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C, 376 bool MayIncludeUndef = false); 377 378 bool markConstant(Value *V, Constant *C) { 379 assert(!V->getType()->isStructTy() && "structs should use mergeInValue"); 380 return markConstant(ValueState[V], V, C); 381 } 382 383 // markOverdefined - Make a value be marked as "overdefined". If the 384 // value is not already overdefined, add it to the overdefined instruction 385 // work list so that the users of the instruction are updated later. 386 bool markOverdefined(ValueLatticeElement &IV, Value *V); 387 388 /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV 389 /// changes. 390 bool mergeInValue(ValueLatticeElement &IV, Value *V, 391 ValueLatticeElement MergeWithV, 392 ValueLatticeElement::MergeOptions Opts = { 393 /*MayIncludeUndef=*/false, /*CheckWiden=*/false}); 394 395 bool mergeInValue(Value *V, ValueLatticeElement MergeWithV, 396 ValueLatticeElement::MergeOptions Opts = { 397 /*MayIncludeUndef=*/false, /*CheckWiden=*/false}) { 398 assert(!V->getType()->isStructTy() && 399 "non-structs should use markConstant"); 400 return mergeInValue(ValueState[V], V, MergeWithV, Opts); 401 } 402 403 /// getValueState - Return the ValueLatticeElement object that corresponds to 404 /// the value. This function handles the case when the value hasn't been seen 405 /// yet by properly seeding constants etc. 406 ValueLatticeElement &getValueState(Value *V) { 407 assert(!V->getType()->isStructTy() && "Should use getStructValueState"); 408 409 auto I = ValueState.insert(std::make_pair(V, ValueLatticeElement())); 410 ValueLatticeElement &LV = I.first->second; 411 412 if (!I.second) 413 return LV; // Common case, already in the map. 414 415 if (auto *C = dyn_cast<Constant>(V)) 416 LV.markConstant(C); // Constants are constant 417 418 // All others are unknown by default. 419 return LV; 420 } 421 422 /// getStructValueState - Return the ValueLatticeElement object that 423 /// corresponds to the value/field pair. This function handles the case when 424 /// the value hasn't been seen yet by properly seeding constants etc. 425 ValueLatticeElement &getStructValueState(Value *V, unsigned i) { 426 assert(V->getType()->isStructTy() && "Should use getValueState"); 427 assert(i < cast<StructType>(V->getType())->getNumElements() && 428 "Invalid element #"); 429 430 auto I = StructValueState.insert( 431 std::make_pair(std::make_pair(V, i), ValueLatticeElement())); 432 ValueLatticeElement &LV = I.first->second; 433 434 if (!I.second) 435 return LV; // Common case, already in the map. 436 437 if (auto *C = dyn_cast<Constant>(V)) { 438 Constant *Elt = C->getAggregateElement(i); 439 440 if (!Elt) 441 LV.markOverdefined(); // Unknown sort of constant. 442 else 443 LV.markConstant(Elt); // Constants are constant. 444 } 445 446 // All others are underdefined by default. 447 return LV; 448 } 449 450 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB 451 /// work list if it is not already executable. 452 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest); 453 454 // getFeasibleSuccessors - Return a vector of booleans to indicate which 455 // successors are reachable from a given terminator instruction. 456 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs); 457 458 // OperandChangedState - This method is invoked on all of the users of an 459 // instruction that was just changed state somehow. Based on this 460 // information, we need to update the specified user of this instruction. 461 void operandChangedState(Instruction *I) { 462 if (BBExecutable.count(I->getParent())) // Inst is executable? 463 visit(*I); 464 } 465 466 // Add U as additional user of V. 467 void addAdditionalUser(Value *V, User *U) { 468 auto Iter = AdditionalUsers.insert({V, {}}); 469 Iter.first->second.insert(U); 470 } 471 472 // Mark I's users as changed, including AdditionalUsers. 473 void markUsersAsChanged(Value *I) { 474 // Functions include their arguments in the use-list. Changed function 475 // values mean that the result of the function changed. We only need to 476 // update the call sites with the new function result and do not have to 477 // propagate the call arguments. 478 if (isa<Function>(I)) { 479 for (User *U : I->users()) { 480 if (auto *CB = dyn_cast<CallBase>(U)) 481 handleCallResult(*CB); 482 } 483 } else { 484 for (User *U : I->users()) 485 if (auto *UI = dyn_cast<Instruction>(U)) 486 operandChangedState(UI); 487 } 488 489 auto Iter = AdditionalUsers.find(I); 490 if (Iter != AdditionalUsers.end()) { 491 // Copy additional users before notifying them of changes, because new 492 // users may be added, potentially invalidating the iterator. 493 SmallVector<Instruction *, 2> ToNotify; 494 for (User *U : Iter->second) 495 if (auto *UI = dyn_cast<Instruction>(U)) 496 ToNotify.push_back(UI); 497 for (Instruction *UI : ToNotify) 498 operandChangedState(UI); 499 } 500 } 501 void handleCallOverdefined(CallBase &CB); 502 void handleCallResult(CallBase &CB); 503 void handleCallArguments(CallBase &CB); 504 void handleExtractOfWithOverflow(ExtractValueInst &EVI, 505 const WithOverflowInst *WO, unsigned Idx); 506 507 private: 508 friend class InstVisitor<SCCPInstVisitor>; 509 510 // visit implementations - Something changed in this instruction. Either an 511 // operand made a transition, or the instruction is newly executable. Change 512 // the value type of I to reflect these changes if appropriate. 513 void visitPHINode(PHINode &I); 514 515 // Terminators 516 517 void visitReturnInst(ReturnInst &I); 518 void visitTerminator(Instruction &TI); 519 520 void visitCastInst(CastInst &I); 521 void visitSelectInst(SelectInst &I); 522 void visitUnaryOperator(Instruction &I); 523 void visitBinaryOperator(Instruction &I); 524 void visitCmpInst(CmpInst &I); 525 void visitExtractValueInst(ExtractValueInst &EVI); 526 void visitInsertValueInst(InsertValueInst &IVI); 527 528 void visitCatchSwitchInst(CatchSwitchInst &CPI) { 529 markOverdefined(&CPI); 530 visitTerminator(CPI); 531 } 532 533 // Instructions that cannot be folded away. 534 535 void visitStoreInst(StoreInst &I); 536 void visitLoadInst(LoadInst &I); 537 void visitGetElementPtrInst(GetElementPtrInst &I); 538 539 void visitInvokeInst(InvokeInst &II) { 540 visitCallBase(II); 541 visitTerminator(II); 542 } 543 544 void visitCallBrInst(CallBrInst &CBI) { 545 visitCallBase(CBI); 546 visitTerminator(CBI); 547 } 548 549 void visitCallBase(CallBase &CB); 550 void visitResumeInst(ResumeInst &I) { /*returns void*/ 551 } 552 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/ 553 } 554 void visitFenceInst(FenceInst &I) { /*returns void*/ 555 } 556 557 void visitInstruction(Instruction &I); 558 559 public: 560 void addAnalysis(Function &F, AnalysisResultsForFn A) { 561 AnalysisResults.insert({&F, std::move(A)}); 562 } 563 564 void visitCallInst(CallInst &I) { visitCallBase(I); } 565 566 bool markBlockExecutable(BasicBlock *BB); 567 568 const PredicateBase *getPredicateInfoFor(Instruction *I) { 569 auto A = AnalysisResults.find(I->getParent()->getParent()); 570 if (A == AnalysisResults.end()) 571 return nullptr; 572 return A->second.PredInfo->getPredicateInfoFor(I); 573 } 574 575 const LoopInfo &getLoopInfo(Function &F) { 576 auto A = AnalysisResults.find(&F); 577 assert(A != AnalysisResults.end() && A->second.LI && 578 "Need LoopInfo analysis results for function."); 579 return *A->second.LI; 580 } 581 582 DomTreeUpdater getDTU(Function &F) { 583 auto A = AnalysisResults.find(&F); 584 assert(A != AnalysisResults.end() && "Need analysis results for function."); 585 return {A->second.DT, A->second.PDT, DomTreeUpdater::UpdateStrategy::Lazy}; 586 } 587 588 SCCPInstVisitor(const DataLayout &DL, 589 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 590 LLVMContext &Ctx) 591 : DL(DL), GetTLI(GetTLI), Ctx(Ctx) {} 592 593 void trackValueOfGlobalVariable(GlobalVariable *GV) { 594 // We only track the contents of scalar globals. 595 if (GV->getValueType()->isSingleValueType()) { 596 ValueLatticeElement &IV = TrackedGlobals[GV]; 597 IV.markConstant(GV->getInitializer()); 598 } 599 } 600 601 void addTrackedFunction(Function *F) { 602 // Add an entry, F -> undef. 603 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) { 604 MRVFunctionsTracked.insert(F); 605 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 606 TrackedMultipleRetVals.insert( 607 std::make_pair(std::make_pair(F, i), ValueLatticeElement())); 608 } else if (!F->getReturnType()->isVoidTy()) 609 TrackedRetVals.insert(std::make_pair(F, ValueLatticeElement())); 610 } 611 612 void addToMustPreserveReturnsInFunctions(Function *F) { 613 MustPreserveReturnsInFunctions.insert(F); 614 } 615 616 bool mustPreserveReturn(Function *F) { 617 return MustPreserveReturnsInFunctions.count(F); 618 } 619 620 void addArgumentTrackedFunction(Function *F) { 621 TrackingIncomingArguments.insert(F); 622 } 623 624 bool isArgumentTrackedFunction(Function *F) { 625 return TrackingIncomingArguments.count(F); 626 } 627 628 void solve(); 629 630 bool resolvedUndefsIn(Function &F); 631 632 bool isBlockExecutable(BasicBlock *BB) const { 633 return BBExecutable.count(BB); 634 } 635 636 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const; 637 638 std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const { 639 std::vector<ValueLatticeElement> StructValues; 640 auto *STy = dyn_cast<StructType>(V->getType()); 641 assert(STy && "getStructLatticeValueFor() can be called only on structs"); 642 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 643 auto I = StructValueState.find(std::make_pair(V, i)); 644 assert(I != StructValueState.end() && "Value not in valuemap!"); 645 StructValues.push_back(I->second); 646 } 647 return StructValues; 648 } 649 650 void removeLatticeValueFor(Value *V) { ValueState.erase(V); } 651 652 const ValueLatticeElement &getLatticeValueFor(Value *V) const { 653 assert(!V->getType()->isStructTy() && 654 "Should use getStructLatticeValueFor"); 655 DenseMap<Value *, ValueLatticeElement>::const_iterator I = 656 ValueState.find(V); 657 assert(I != ValueState.end() && 658 "V not found in ValueState nor Paramstate map!"); 659 return I->second; 660 } 661 662 const MapVector<Function *, ValueLatticeElement> &getTrackedRetVals() { 663 return TrackedRetVals; 664 } 665 666 const DenseMap<GlobalVariable *, ValueLatticeElement> &getTrackedGlobals() { 667 return TrackedGlobals; 668 } 669 670 const SmallPtrSet<Function *, 16> getMRVFunctionsTracked() { 671 return MRVFunctionsTracked; 672 } 673 674 void markOverdefined(Value *V) { 675 if (auto *STy = dyn_cast<StructType>(V->getType())) 676 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 677 markOverdefined(getStructValueState(V, i), V); 678 else 679 markOverdefined(ValueState[V], V); 680 } 681 682 bool isStructLatticeConstant(Function *F, StructType *STy); 683 684 Constant *getConstant(const ValueLatticeElement &LV) const; 685 ConstantRange getConstantRange(const ValueLatticeElement &LV, Type *Ty) const; 686 687 SmallPtrSetImpl<Function *> &getArgumentTrackedFunctions() { 688 return TrackingIncomingArguments; 689 } 690 691 void markArgInFuncSpecialization(Function *F, 692 const SmallVectorImpl<ArgInfo> &Args); 693 694 void markFunctionUnreachable(Function *F) { 695 for (auto &BB : *F) 696 BBExecutable.erase(&BB); 697 } 698 699 void solveWhileResolvedUndefsIn(Module &M) { 700 bool ResolvedUndefs = true; 701 while (ResolvedUndefs) { 702 solve(); 703 ResolvedUndefs = false; 704 for (Function &F : M) 705 ResolvedUndefs |= resolvedUndefsIn(F); 706 } 707 } 708 709 void solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) { 710 bool ResolvedUndefs = true; 711 while (ResolvedUndefs) { 712 solve(); 713 ResolvedUndefs = false; 714 for (Function *F : WorkList) 715 ResolvedUndefs |= resolvedUndefsIn(*F); 716 } 717 } 718 }; 719 720 } // namespace llvm 721 722 bool SCCPInstVisitor::markBlockExecutable(BasicBlock *BB) { 723 if (!BBExecutable.insert(BB).second) 724 return false; 725 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n'); 726 BBWorkList.push_back(BB); // Add the block to the work list! 727 return true; 728 } 729 730 void SCCPInstVisitor::pushToWorkList(ValueLatticeElement &IV, Value *V) { 731 if (IV.isOverdefined()) 732 return OverdefinedInstWorkList.push_back(V); 733 InstWorkList.push_back(V); 734 } 735 736 void SCCPInstVisitor::pushToWorkListMsg(ValueLatticeElement &IV, Value *V) { 737 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n'); 738 pushToWorkList(IV, V); 739 } 740 741 bool SCCPInstVisitor::markConstant(ValueLatticeElement &IV, Value *V, 742 Constant *C, bool MayIncludeUndef) { 743 if (!IV.markConstant(C, MayIncludeUndef)) 744 return false; 745 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n'); 746 pushToWorkList(IV, V); 747 return true; 748 } 749 750 bool SCCPInstVisitor::markOverdefined(ValueLatticeElement &IV, Value *V) { 751 if (!IV.markOverdefined()) 752 return false; 753 754 LLVM_DEBUG(dbgs() << "markOverdefined: "; 755 if (auto *F = dyn_cast<Function>(V)) dbgs() 756 << "Function '" << F->getName() << "'\n"; 757 else dbgs() << *V << '\n'); 758 // Only instructions go on the work list 759 pushToWorkList(IV, V); 760 return true; 761 } 762 763 bool SCCPInstVisitor::isStructLatticeConstant(Function *F, StructType *STy) { 764 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 765 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i)); 766 assert(It != TrackedMultipleRetVals.end()); 767 ValueLatticeElement LV = It->second; 768 if (!SCCPSolver::isConstant(LV)) 769 return false; 770 } 771 return true; 772 } 773 774 Constant *SCCPInstVisitor::getConstant(const ValueLatticeElement &LV) const { 775 if (LV.isConstant()) 776 return LV.getConstant(); 777 778 if (LV.isConstantRange()) { 779 const auto &CR = LV.getConstantRange(); 780 if (CR.getSingleElement()) 781 return ConstantInt::get(Ctx, *CR.getSingleElement()); 782 } 783 return nullptr; 784 } 785 786 ConstantRange 787 SCCPInstVisitor::getConstantRange(const ValueLatticeElement &LV, 788 Type *Ty) const { 789 assert(Ty->isIntOrIntVectorTy() && "Should be int or int vector"); 790 if (LV.isConstantRange()) 791 return LV.getConstantRange(); 792 return ConstantRange::getFull(Ty->getScalarSizeInBits()); 793 } 794 795 void SCCPInstVisitor::markArgInFuncSpecialization( 796 Function *F, const SmallVectorImpl<ArgInfo> &Args) { 797 assert(!Args.empty() && "Specialization without arguments"); 798 assert(F->arg_size() == Args[0].Formal->getParent()->arg_size() && 799 "Functions should have the same number of arguments"); 800 801 auto Iter = Args.begin(); 802 Argument *NewArg = F->arg_begin(); 803 Argument *OldArg = Args[0].Formal->getParent()->arg_begin(); 804 for (auto End = F->arg_end(); NewArg != End; ++NewArg, ++OldArg) { 805 806 LLVM_DEBUG(dbgs() << "SCCP: Marking argument " 807 << NewArg->getNameOrAsOperand() << "\n"); 808 809 if (Iter != Args.end() && OldArg == Iter->Formal) { 810 // Mark the argument constants in the new function. 811 markConstant(NewArg, Iter->Actual); 812 ++Iter; 813 } else if (ValueState.count(OldArg)) { 814 // For the remaining arguments in the new function, copy the lattice state 815 // over from the old function. 816 // 817 // Note: This previously looked like this: 818 // ValueState[NewArg] = ValueState[OldArg]; 819 // This is incorrect because the DenseMap class may resize the underlying 820 // memory when inserting `NewArg`, which will invalidate the reference to 821 // `OldArg`. Instead, we make sure `NewArg` exists before setting it. 822 auto &NewValue = ValueState[NewArg]; 823 NewValue = ValueState[OldArg]; 824 pushToWorkList(NewValue, NewArg); 825 } 826 } 827 } 828 829 void SCCPInstVisitor::visitInstruction(Instruction &I) { 830 // All the instructions we don't do any special handling for just 831 // go to overdefined. 832 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n'); 833 markOverdefined(&I); 834 } 835 836 bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V, 837 ValueLatticeElement MergeWithV, 838 ValueLatticeElement::MergeOptions Opts) { 839 if (IV.mergeIn(MergeWithV, Opts)) { 840 pushToWorkList(IV, V); 841 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : " 842 << IV << "\n"); 843 return true; 844 } 845 return false; 846 } 847 848 bool SCCPInstVisitor::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) { 849 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second) 850 return false; // This edge is already known to be executable! 851 852 if (!markBlockExecutable(Dest)) { 853 // If the destination is already executable, we just made an *edge* 854 // feasible that wasn't before. Revisit the PHI nodes in the block 855 // because they have potentially new operands. 856 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() 857 << " -> " << Dest->getName() << '\n'); 858 859 for (PHINode &PN : Dest->phis()) 860 visitPHINode(PN); 861 } 862 return true; 863 } 864 865 // getFeasibleSuccessors - Return a vector of booleans to indicate which 866 // successors are reachable from a given terminator instruction. 867 void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI, 868 SmallVectorImpl<bool> &Succs) { 869 Succs.resize(TI.getNumSuccessors()); 870 if (auto *BI = dyn_cast<BranchInst>(&TI)) { 871 if (BI->isUnconditional()) { 872 Succs[0] = true; 873 return; 874 } 875 876 ValueLatticeElement BCValue = getValueState(BI->getCondition()); 877 ConstantInt *CI = getConstantInt(BCValue); 878 if (!CI) { 879 // Overdefined condition variables, and branches on unfoldable constant 880 // conditions, mean the branch could go either way. 881 if (!BCValue.isUnknownOrUndef()) 882 Succs[0] = Succs[1] = true; 883 return; 884 } 885 886 // Constant condition variables mean the branch can only go a single way. 887 Succs[CI->isZero()] = true; 888 return; 889 } 890 891 // Unwinding instructions successors are always executable. 892 if (TI.isExceptionalTerminator()) { 893 Succs.assign(TI.getNumSuccessors(), true); 894 return; 895 } 896 897 if (auto *SI = dyn_cast<SwitchInst>(&TI)) { 898 if (!SI->getNumCases()) { 899 Succs[0] = true; 900 return; 901 } 902 const ValueLatticeElement &SCValue = getValueState(SI->getCondition()); 903 if (ConstantInt *CI = getConstantInt(SCValue)) { 904 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true; 905 return; 906 } 907 908 // TODO: Switch on undef is UB. Stop passing false once the rest of LLVM 909 // is ready. 910 if (SCValue.isConstantRange(/*UndefAllowed=*/false)) { 911 const ConstantRange &Range = SCValue.getConstantRange(); 912 for (const auto &Case : SI->cases()) { 913 const APInt &CaseValue = Case.getCaseValue()->getValue(); 914 if (Range.contains(CaseValue)) 915 Succs[Case.getSuccessorIndex()] = true; 916 } 917 918 // TODO: Determine whether default case is reachable. 919 Succs[SI->case_default()->getSuccessorIndex()] = true; 920 return; 921 } 922 923 // Overdefined or unknown condition? All destinations are executable! 924 if (!SCValue.isUnknownOrUndef()) 925 Succs.assign(TI.getNumSuccessors(), true); 926 return; 927 } 928 929 // In case of indirect branch and its address is a blockaddress, we mark 930 // the target as executable. 931 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) { 932 // Casts are folded by visitCastInst. 933 ValueLatticeElement IBRValue = getValueState(IBR->getAddress()); 934 BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(getConstant(IBRValue)); 935 if (!Addr) { // Overdefined or unknown condition? 936 // All destinations are executable! 937 if (!IBRValue.isUnknownOrUndef()) 938 Succs.assign(TI.getNumSuccessors(), true); 939 return; 940 } 941 942 BasicBlock *T = Addr->getBasicBlock(); 943 assert(Addr->getFunction() == T->getParent() && 944 "Block address of a different function ?"); 945 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) { 946 // This is the target. 947 if (IBR->getDestination(i) == T) { 948 Succs[i] = true; 949 return; 950 } 951 } 952 953 // If we didn't find our destination in the IBR successor list, then we 954 // have undefined behavior. Its ok to assume no successor is executable. 955 return; 956 } 957 958 // In case of callbr, we pessimistically assume that all successors are 959 // feasible. 960 if (isa<CallBrInst>(&TI)) { 961 Succs.assign(TI.getNumSuccessors(), true); 962 return; 963 } 964 965 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n'); 966 llvm_unreachable("SCCP: Don't know how to handle this terminator!"); 967 } 968 969 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic 970 // block to the 'To' basic block is currently feasible. 971 bool SCCPInstVisitor::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const { 972 // Check if we've called markEdgeExecutable on the edge yet. (We could 973 // be more aggressive and try to consider edges which haven't been marked 974 // yet, but there isn't any need.) 975 return KnownFeasibleEdges.count(Edge(From, To)); 976 } 977 978 // visit Implementations - Something changed in this instruction, either an 979 // operand made a transition, or the instruction is newly executable. Change 980 // the value type of I to reflect these changes if appropriate. This method 981 // makes sure to do the following actions: 982 // 983 // 1. If a phi node merges two constants in, and has conflicting value coming 984 // from different branches, or if the PHI node merges in an overdefined 985 // value, then the PHI node becomes overdefined. 986 // 2. If a phi node merges only constants in, and they all agree on value, the 987 // PHI node becomes a constant value equal to that. 988 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant 989 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined 990 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined 991 // 6. If a conditional branch has a value that is constant, make the selected 992 // destination executable 993 // 7. If a conditional branch has a value that is overdefined, make all 994 // successors executable. 995 void SCCPInstVisitor::visitPHINode(PHINode &PN) { 996 // If this PN returns a struct, just mark the result overdefined. 997 // TODO: We could do a lot better than this if code actually uses this. 998 if (PN.getType()->isStructTy()) 999 return (void)markOverdefined(&PN); 1000 1001 if (getValueState(&PN).isOverdefined()) 1002 return; // Quick exit 1003 1004 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant, 1005 // and slow us down a lot. Just mark them overdefined. 1006 if (PN.getNumIncomingValues() > 64) 1007 return (void)markOverdefined(&PN); 1008 1009 unsigned NumActiveIncoming = 0; 1010 1011 // Look at all of the executable operands of the PHI node. If any of them 1012 // are overdefined, the PHI becomes overdefined as well. If they are all 1013 // constant, and they agree with each other, the PHI becomes the identical 1014 // constant. If they are constant and don't agree, the PHI is a constant 1015 // range. If there are no executable operands, the PHI remains unknown. 1016 ValueLatticeElement PhiState = getValueState(&PN); 1017 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { 1018 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent())) 1019 continue; 1020 1021 ValueLatticeElement IV = getValueState(PN.getIncomingValue(i)); 1022 PhiState.mergeIn(IV); 1023 NumActiveIncoming++; 1024 if (PhiState.isOverdefined()) 1025 break; 1026 } 1027 1028 // We allow up to 1 range extension per active incoming value and one 1029 // additional extension. Note that we manually adjust the number of range 1030 // extensions to match the number of active incoming values. This helps to 1031 // limit multiple extensions caused by the same incoming value, if other 1032 // incoming values are equal. 1033 mergeInValue(&PN, PhiState, 1034 ValueLatticeElement::MergeOptions().setMaxWidenSteps( 1035 NumActiveIncoming + 1)); 1036 ValueLatticeElement &PhiStateRef = getValueState(&PN); 1037 PhiStateRef.setNumRangeExtensions( 1038 std::max(NumActiveIncoming, PhiStateRef.getNumRangeExtensions())); 1039 } 1040 1041 void SCCPInstVisitor::visitReturnInst(ReturnInst &I) { 1042 if (I.getNumOperands() == 0) 1043 return; // ret void 1044 1045 Function *F = I.getParent()->getParent(); 1046 Value *ResultOp = I.getOperand(0); 1047 1048 // If we are tracking the return value of this function, merge it in. 1049 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) { 1050 auto TFRVI = TrackedRetVals.find(F); 1051 if (TFRVI != TrackedRetVals.end()) { 1052 mergeInValue(TFRVI->second, F, getValueState(ResultOp)); 1053 return; 1054 } 1055 } 1056 1057 // Handle functions that return multiple values. 1058 if (!TrackedMultipleRetVals.empty()) { 1059 if (auto *STy = dyn_cast<StructType>(ResultOp->getType())) 1060 if (MRVFunctionsTracked.count(F)) 1061 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 1062 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F, 1063 getStructValueState(ResultOp, i)); 1064 } 1065 } 1066 1067 void SCCPInstVisitor::visitTerminator(Instruction &TI) { 1068 SmallVector<bool, 16> SuccFeasible; 1069 getFeasibleSuccessors(TI, SuccFeasible); 1070 1071 BasicBlock *BB = TI.getParent(); 1072 1073 // Mark all feasible successors executable. 1074 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i) 1075 if (SuccFeasible[i]) 1076 markEdgeExecutable(BB, TI.getSuccessor(i)); 1077 } 1078 1079 void SCCPInstVisitor::visitCastInst(CastInst &I) { 1080 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1081 // discover a concrete value later. 1082 if (ValueState[&I].isOverdefined()) 1083 return; 1084 1085 ValueLatticeElement OpSt = getValueState(I.getOperand(0)); 1086 if (OpSt.isUnknownOrUndef()) 1087 return; 1088 1089 if (Constant *OpC = getConstant(OpSt)) { 1090 // Fold the constant as we build. 1091 Constant *C = ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL); 1092 markConstant(&I, C); 1093 } else if (I.getDestTy()->isIntegerTy() && 1094 I.getSrcTy()->isIntOrIntVectorTy()) { 1095 auto &LV = getValueState(&I); 1096 ConstantRange OpRange = getConstantRange(OpSt, I.getSrcTy()); 1097 1098 Type *DestTy = I.getDestTy(); 1099 // Vectors where all elements have the same known constant range are treated 1100 // as a single constant range in the lattice. When bitcasting such vectors, 1101 // there is a mis-match between the width of the lattice value (single 1102 // constant range) and the original operands (vector). Go to overdefined in 1103 // that case. 1104 if (I.getOpcode() == Instruction::BitCast && 1105 I.getOperand(0)->getType()->isVectorTy() && 1106 OpRange.getBitWidth() < DL.getTypeSizeInBits(DestTy)) 1107 return (void)markOverdefined(&I); 1108 1109 ConstantRange Res = 1110 OpRange.castOp(I.getOpcode(), DL.getTypeSizeInBits(DestTy)); 1111 mergeInValue(LV, &I, ValueLatticeElement::getRange(Res)); 1112 } else 1113 markOverdefined(&I); 1114 } 1115 1116 void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI, 1117 const WithOverflowInst *WO, 1118 unsigned Idx) { 1119 Value *LHS = WO->getLHS(), *RHS = WO->getRHS(); 1120 ValueLatticeElement L = getValueState(LHS); 1121 ValueLatticeElement R = getValueState(RHS); 1122 addAdditionalUser(LHS, &EVI); 1123 addAdditionalUser(RHS, &EVI); 1124 if (L.isUnknownOrUndef() || R.isUnknownOrUndef()) 1125 return; // Wait to resolve. 1126 1127 Type *Ty = LHS->getType(); 1128 ConstantRange LR = getConstantRange(L, Ty); 1129 ConstantRange RR = getConstantRange(R, Ty); 1130 if (Idx == 0) { 1131 ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR); 1132 mergeInValue(&EVI, ValueLatticeElement::getRange(Res)); 1133 } else { 1134 assert(Idx == 1 && "Index can only be 0 or 1"); 1135 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion( 1136 WO->getBinaryOp(), RR, WO->getNoWrapKind()); 1137 if (NWRegion.contains(LR)) 1138 return (void)markConstant(&EVI, ConstantInt::getFalse(EVI.getType())); 1139 markOverdefined(&EVI); 1140 } 1141 } 1142 1143 void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) { 1144 // If this returns a struct, mark all elements over defined, we don't track 1145 // structs in structs. 1146 if (EVI.getType()->isStructTy()) 1147 return (void)markOverdefined(&EVI); 1148 1149 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1150 // discover a concrete value later. 1151 if (ValueState[&EVI].isOverdefined()) 1152 return (void)markOverdefined(&EVI); 1153 1154 // If this is extracting from more than one level of struct, we don't know. 1155 if (EVI.getNumIndices() != 1) 1156 return (void)markOverdefined(&EVI); 1157 1158 Value *AggVal = EVI.getAggregateOperand(); 1159 if (AggVal->getType()->isStructTy()) { 1160 unsigned i = *EVI.idx_begin(); 1161 if (auto *WO = dyn_cast<WithOverflowInst>(AggVal)) 1162 return handleExtractOfWithOverflow(EVI, WO, i); 1163 ValueLatticeElement EltVal = getStructValueState(AggVal, i); 1164 mergeInValue(getValueState(&EVI), &EVI, EltVal); 1165 } else { 1166 // Otherwise, must be extracting from an array. 1167 return (void)markOverdefined(&EVI); 1168 } 1169 } 1170 1171 void SCCPInstVisitor::visitInsertValueInst(InsertValueInst &IVI) { 1172 auto *STy = dyn_cast<StructType>(IVI.getType()); 1173 if (!STy) 1174 return (void)markOverdefined(&IVI); 1175 1176 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1177 // discover a concrete value later. 1178 if (SCCPSolver::isOverdefined(ValueState[&IVI])) 1179 return (void)markOverdefined(&IVI); 1180 1181 // If this has more than one index, we can't handle it, drive all results to 1182 // undef. 1183 if (IVI.getNumIndices() != 1) 1184 return (void)markOverdefined(&IVI); 1185 1186 Value *Aggr = IVI.getAggregateOperand(); 1187 unsigned Idx = *IVI.idx_begin(); 1188 1189 // Compute the result based on what we're inserting. 1190 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1191 // This passes through all values that aren't the inserted element. 1192 if (i != Idx) { 1193 ValueLatticeElement EltVal = getStructValueState(Aggr, i); 1194 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal); 1195 continue; 1196 } 1197 1198 Value *Val = IVI.getInsertedValueOperand(); 1199 if (Val->getType()->isStructTy()) 1200 // We don't track structs in structs. 1201 markOverdefined(getStructValueState(&IVI, i), &IVI); 1202 else { 1203 ValueLatticeElement InVal = getValueState(Val); 1204 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal); 1205 } 1206 } 1207 } 1208 1209 void SCCPInstVisitor::visitSelectInst(SelectInst &I) { 1210 // If this select returns a struct, just mark the result overdefined. 1211 // TODO: We could do a lot better than this if code actually uses this. 1212 if (I.getType()->isStructTy()) 1213 return (void)markOverdefined(&I); 1214 1215 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1216 // discover a concrete value later. 1217 if (ValueState[&I].isOverdefined()) 1218 return (void)markOverdefined(&I); 1219 1220 ValueLatticeElement CondValue = getValueState(I.getCondition()); 1221 if (CondValue.isUnknownOrUndef()) 1222 return; 1223 1224 if (ConstantInt *CondCB = getConstantInt(CondValue)) { 1225 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue(); 1226 mergeInValue(&I, getValueState(OpVal)); 1227 return; 1228 } 1229 1230 // Otherwise, the condition is overdefined or a constant we can't evaluate. 1231 // See if we can produce something better than overdefined based on the T/F 1232 // value. 1233 ValueLatticeElement TVal = getValueState(I.getTrueValue()); 1234 ValueLatticeElement FVal = getValueState(I.getFalseValue()); 1235 1236 bool Changed = ValueState[&I].mergeIn(TVal); 1237 Changed |= ValueState[&I].mergeIn(FVal); 1238 if (Changed) 1239 pushToWorkListMsg(ValueState[&I], &I); 1240 } 1241 1242 // Handle Unary Operators. 1243 void SCCPInstVisitor::visitUnaryOperator(Instruction &I) { 1244 ValueLatticeElement V0State = getValueState(I.getOperand(0)); 1245 1246 ValueLatticeElement &IV = ValueState[&I]; 1247 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1248 // discover a concrete value later. 1249 if (SCCPSolver::isOverdefined(IV)) 1250 return (void)markOverdefined(&I); 1251 1252 // If something is unknown/undef, wait for it to resolve. 1253 if (V0State.isUnknownOrUndef()) 1254 return; 1255 1256 if (SCCPSolver::isConstant(V0State)) 1257 if (Constant *C = ConstantFoldUnaryOpOperand(I.getOpcode(), 1258 getConstant(V0State), DL)) 1259 return (void)markConstant(IV, &I, C); 1260 1261 markOverdefined(&I); 1262 } 1263 1264 // Handle Binary Operators. 1265 void SCCPInstVisitor::visitBinaryOperator(Instruction &I) { 1266 ValueLatticeElement V1State = getValueState(I.getOperand(0)); 1267 ValueLatticeElement V2State = getValueState(I.getOperand(1)); 1268 1269 ValueLatticeElement &IV = ValueState[&I]; 1270 if (IV.isOverdefined()) 1271 return; 1272 1273 // If something is undef, wait for it to resolve. 1274 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) 1275 return; 1276 1277 if (V1State.isOverdefined() && V2State.isOverdefined()) 1278 return (void)markOverdefined(&I); 1279 1280 // If either of the operands is a constant, try to fold it to a constant. 1281 // TODO: Use information from notconstant better. 1282 if ((V1State.isConstant() || V2State.isConstant())) { 1283 Value *V1 = SCCPSolver::isConstant(V1State) ? getConstant(V1State) 1284 : I.getOperand(0); 1285 Value *V2 = SCCPSolver::isConstant(V2State) ? getConstant(V2State) 1286 : I.getOperand(1); 1287 Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL)); 1288 auto *C = dyn_cast_or_null<Constant>(R); 1289 if (C) { 1290 // Conservatively assume that the result may be based on operands that may 1291 // be undef. Note that we use mergeInValue to combine the constant with 1292 // the existing lattice value for I, as different constants might be found 1293 // after one of the operands go to overdefined, e.g. due to one operand 1294 // being a special floating value. 1295 ValueLatticeElement NewV; 1296 NewV.markConstant(C, /*MayIncludeUndef=*/true); 1297 return (void)mergeInValue(&I, NewV); 1298 } 1299 } 1300 1301 // Only use ranges for binary operators on integers. 1302 if (!I.getType()->isIntegerTy()) 1303 return markOverdefined(&I); 1304 1305 // Try to simplify to a constant range. 1306 ConstantRange A = getConstantRange(V1State, I.getType()); 1307 ConstantRange B = getConstantRange(V2State, I.getType()); 1308 ConstantRange R = A.binaryOp(cast<BinaryOperator>(&I)->getOpcode(), B); 1309 mergeInValue(&I, ValueLatticeElement::getRange(R)); 1310 1311 // TODO: Currently we do not exploit special values that produce something 1312 // better than overdefined with an overdefined operand for vector or floating 1313 // point types, like and <4 x i32> overdefined, zeroinitializer. 1314 } 1315 1316 // Handle ICmpInst instruction. 1317 void SCCPInstVisitor::visitCmpInst(CmpInst &I) { 1318 // Do not cache this lookup, getValueState calls later in the function might 1319 // invalidate the reference. 1320 if (SCCPSolver::isOverdefined(ValueState[&I])) 1321 return (void)markOverdefined(&I); 1322 1323 Value *Op1 = I.getOperand(0); 1324 Value *Op2 = I.getOperand(1); 1325 1326 // For parameters, use ParamState which includes constant range info if 1327 // available. 1328 auto V1State = getValueState(Op1); 1329 auto V2State = getValueState(Op2); 1330 1331 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State, DL); 1332 if (C) { 1333 ValueLatticeElement CV; 1334 CV.markConstant(C); 1335 mergeInValue(&I, CV); 1336 return; 1337 } 1338 1339 // If operands are still unknown, wait for it to resolve. 1340 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) && 1341 !SCCPSolver::isConstant(ValueState[&I])) 1342 return; 1343 1344 markOverdefined(&I); 1345 } 1346 1347 // Handle getelementptr instructions. If all operands are constants then we 1348 // can turn this into a getelementptr ConstantExpr. 1349 void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) { 1350 if (SCCPSolver::isOverdefined(ValueState[&I])) 1351 return (void)markOverdefined(&I); 1352 1353 SmallVector<Constant *, 8> Operands; 1354 Operands.reserve(I.getNumOperands()); 1355 1356 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 1357 ValueLatticeElement State = getValueState(I.getOperand(i)); 1358 if (State.isUnknownOrUndef()) 1359 return; // Operands are not resolved yet. 1360 1361 if (SCCPSolver::isOverdefined(State)) 1362 return (void)markOverdefined(&I); 1363 1364 if (Constant *C = getConstant(State)) { 1365 Operands.push_back(C); 1366 continue; 1367 } 1368 1369 return (void)markOverdefined(&I); 1370 } 1371 1372 Constant *Ptr = Operands[0]; 1373 auto Indices = ArrayRef(Operands.begin() + 1, Operands.end()); 1374 Constant *C = 1375 ConstantExpr::getGetElementPtr(I.getSourceElementType(), Ptr, Indices); 1376 markConstant(&I, C); 1377 } 1378 1379 void SCCPInstVisitor::visitStoreInst(StoreInst &SI) { 1380 // If this store is of a struct, ignore it. 1381 if (SI.getOperand(0)->getType()->isStructTy()) 1382 return; 1383 1384 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1))) 1385 return; 1386 1387 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1)); 1388 auto I = TrackedGlobals.find(GV); 1389 if (I == TrackedGlobals.end()) 1390 return; 1391 1392 // Get the value we are storing into the global, then merge it. 1393 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)), 1394 ValueLatticeElement::MergeOptions().setCheckWiden(false)); 1395 if (I->second.isOverdefined()) 1396 TrackedGlobals.erase(I); // No need to keep tracking this! 1397 } 1398 1399 static ValueLatticeElement getValueFromMetadata(const Instruction *I) { 1400 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) 1401 if (I->getType()->isIntegerTy()) 1402 return ValueLatticeElement::getRange( 1403 getConstantRangeFromMetadata(*Ranges)); 1404 if (I->hasMetadata(LLVMContext::MD_nonnull)) 1405 return ValueLatticeElement::getNot( 1406 ConstantPointerNull::get(cast<PointerType>(I->getType()))); 1407 return ValueLatticeElement::getOverdefined(); 1408 } 1409 1410 // Handle load instructions. If the operand is a constant pointer to a constant 1411 // global, we can replace the load with the loaded constant value! 1412 void SCCPInstVisitor::visitLoadInst(LoadInst &I) { 1413 // If this load is of a struct or the load is volatile, just mark the result 1414 // as overdefined. 1415 if (I.getType()->isStructTy() || I.isVolatile()) 1416 return (void)markOverdefined(&I); 1417 1418 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1419 // discover a concrete value later. 1420 if (ValueState[&I].isOverdefined()) 1421 return (void)markOverdefined(&I); 1422 1423 ValueLatticeElement PtrVal = getValueState(I.getOperand(0)); 1424 if (PtrVal.isUnknownOrUndef()) 1425 return; // The pointer is not resolved yet! 1426 1427 ValueLatticeElement &IV = ValueState[&I]; 1428 1429 if (SCCPSolver::isConstant(PtrVal)) { 1430 Constant *Ptr = getConstant(PtrVal); 1431 1432 // load null is undefined. 1433 if (isa<ConstantPointerNull>(Ptr)) { 1434 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace())) 1435 return (void)markOverdefined(IV, &I); 1436 else 1437 return; 1438 } 1439 1440 // Transform load (constant global) into the value loaded. 1441 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) { 1442 if (!TrackedGlobals.empty()) { 1443 // If we are tracking this global, merge in the known value for it. 1444 auto It = TrackedGlobals.find(GV); 1445 if (It != TrackedGlobals.end()) { 1446 mergeInValue(IV, &I, It->second, getMaxWidenStepsOpts()); 1447 return; 1448 } 1449 } 1450 } 1451 1452 // Transform load from a constant into a constant if possible. 1453 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL)) 1454 return (void)markConstant(IV, &I, C); 1455 } 1456 1457 // Fall back to metadata. 1458 mergeInValue(&I, getValueFromMetadata(&I)); 1459 } 1460 1461 void SCCPInstVisitor::visitCallBase(CallBase &CB) { 1462 handleCallResult(CB); 1463 handleCallArguments(CB); 1464 } 1465 1466 void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) { 1467 Function *F = CB.getCalledFunction(); 1468 1469 // Void return and not tracking callee, just bail. 1470 if (CB.getType()->isVoidTy()) 1471 return; 1472 1473 // Always mark struct return as overdefined. 1474 if (CB.getType()->isStructTy()) 1475 return (void)markOverdefined(&CB); 1476 1477 // Otherwise, if we have a single return value case, and if the function is 1478 // a declaration, maybe we can constant fold it. 1479 if (F && F->isDeclaration() && canConstantFoldCallTo(&CB, F)) { 1480 SmallVector<Constant *, 8> Operands; 1481 for (const Use &A : CB.args()) { 1482 if (A.get()->getType()->isStructTy()) 1483 return markOverdefined(&CB); // Can't handle struct args. 1484 if (A.get()->getType()->isMetadataTy()) 1485 continue; // Carried in CB, not allowed in Operands. 1486 ValueLatticeElement State = getValueState(A); 1487 1488 if (State.isUnknownOrUndef()) 1489 return; // Operands are not resolved yet. 1490 if (SCCPSolver::isOverdefined(State)) 1491 return (void)markOverdefined(&CB); 1492 assert(SCCPSolver::isConstant(State) && "Unknown state!"); 1493 Operands.push_back(getConstant(State)); 1494 } 1495 1496 if (SCCPSolver::isOverdefined(getValueState(&CB))) 1497 return (void)markOverdefined(&CB); 1498 1499 // If we can constant fold this, mark the result of the call as a 1500 // constant. 1501 if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F))) 1502 return (void)markConstant(&CB, C); 1503 } 1504 1505 // Fall back to metadata. 1506 mergeInValue(&CB, getValueFromMetadata(&CB)); 1507 } 1508 1509 void SCCPInstVisitor::handleCallArguments(CallBase &CB) { 1510 Function *F = CB.getCalledFunction(); 1511 // If this is a local function that doesn't have its address taken, mark its 1512 // entry block executable and merge in the actual arguments to the call into 1513 // the formal arguments of the function. 1514 if (TrackingIncomingArguments.count(F)) { 1515 markBlockExecutable(&F->front()); 1516 1517 // Propagate information from this call site into the callee. 1518 auto CAI = CB.arg_begin(); 1519 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 1520 ++AI, ++CAI) { 1521 // If this argument is byval, and if the function is not readonly, there 1522 // will be an implicit copy formed of the input aggregate. 1523 if (AI->hasByValAttr() && !F->onlyReadsMemory()) { 1524 markOverdefined(&*AI); 1525 continue; 1526 } 1527 1528 if (auto *STy = dyn_cast<StructType>(AI->getType())) { 1529 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1530 ValueLatticeElement CallArg = getStructValueState(*CAI, i); 1531 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg, 1532 getMaxWidenStepsOpts()); 1533 } 1534 } else 1535 mergeInValue(&*AI, getValueState(*CAI), getMaxWidenStepsOpts()); 1536 } 1537 } 1538 } 1539 1540 void SCCPInstVisitor::handleCallResult(CallBase &CB) { 1541 Function *F = CB.getCalledFunction(); 1542 1543 if (auto *II = dyn_cast<IntrinsicInst>(&CB)) { 1544 if (II->getIntrinsicID() == Intrinsic::ssa_copy) { 1545 if (ValueState[&CB].isOverdefined()) 1546 return; 1547 1548 Value *CopyOf = CB.getOperand(0); 1549 ValueLatticeElement CopyOfVal = getValueState(CopyOf); 1550 const auto *PI = getPredicateInfoFor(&CB); 1551 assert(PI && "Missing predicate info for ssa.copy"); 1552 1553 const std::optional<PredicateConstraint> &Constraint = 1554 PI->getConstraint(); 1555 if (!Constraint) { 1556 mergeInValue(ValueState[&CB], &CB, CopyOfVal); 1557 return; 1558 } 1559 1560 CmpInst::Predicate Pred = Constraint->Predicate; 1561 Value *OtherOp = Constraint->OtherOp; 1562 1563 // Wait until OtherOp is resolved. 1564 if (getValueState(OtherOp).isUnknown()) { 1565 addAdditionalUser(OtherOp, &CB); 1566 return; 1567 } 1568 1569 ValueLatticeElement CondVal = getValueState(OtherOp); 1570 ValueLatticeElement &IV = ValueState[&CB]; 1571 if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) { 1572 auto ImposedCR = 1573 ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType())); 1574 1575 // Get the range imposed by the condition. 1576 if (CondVal.isConstantRange()) 1577 ImposedCR = ConstantRange::makeAllowedICmpRegion( 1578 Pred, CondVal.getConstantRange()); 1579 1580 // Combine range info for the original value with the new range from the 1581 // condition. 1582 auto CopyOfCR = getConstantRange(CopyOfVal, CopyOf->getType()); 1583 auto NewCR = ImposedCR.intersectWith(CopyOfCR); 1584 // If the existing information is != x, do not use the information from 1585 // a chained predicate, as the != x information is more likely to be 1586 // helpful in practice. 1587 if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement()) 1588 NewCR = CopyOfCR; 1589 1590 // The new range is based on a branch condition. That guarantees that 1591 // neither of the compare operands can be undef in the branch targets, 1592 // unless we have conditions that are always true/false (e.g. icmp ule 1593 // i32, %a, i32_max). For the latter overdefined/empty range will be 1594 // inferred, but the branch will get folded accordingly anyways. 1595 addAdditionalUser(OtherOp, &CB); 1596 mergeInValue( 1597 IV, &CB, 1598 ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false)); 1599 return; 1600 } else if (Pred == CmpInst::ICMP_EQ && 1601 (CondVal.isConstant() || CondVal.isNotConstant())) { 1602 // For non-integer values or integer constant expressions, only 1603 // propagate equal constants or not-constants. 1604 addAdditionalUser(OtherOp, &CB); 1605 mergeInValue(IV, &CB, CondVal); 1606 return; 1607 } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) { 1608 // Propagate inequalities. 1609 addAdditionalUser(OtherOp, &CB); 1610 mergeInValue(IV, &CB, 1611 ValueLatticeElement::getNot(CondVal.getConstant())); 1612 return; 1613 } 1614 1615 return (void)mergeInValue(IV, &CB, CopyOfVal); 1616 } 1617 1618 if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) { 1619 // Compute result range for intrinsics supported by ConstantRange. 1620 // Do this even if we don't know a range for all operands, as we may 1621 // still know something about the result range, e.g. of abs(x). 1622 SmallVector<ConstantRange, 2> OpRanges; 1623 for (Value *Op : II->args()) { 1624 const ValueLatticeElement &State = getValueState(Op); 1625 OpRanges.push_back(getConstantRange(State, Op->getType())); 1626 } 1627 1628 ConstantRange Result = 1629 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges); 1630 return (void)mergeInValue(II, ValueLatticeElement::getRange(Result)); 1631 } 1632 } 1633 1634 // The common case is that we aren't tracking the callee, either because we 1635 // are not doing interprocedural analysis or the callee is indirect, or is 1636 // external. Handle these cases first. 1637 if (!F || F->isDeclaration()) 1638 return handleCallOverdefined(CB); 1639 1640 // If this is a single/zero retval case, see if we're tracking the function. 1641 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) { 1642 if (!MRVFunctionsTracked.count(F)) 1643 return handleCallOverdefined(CB); // Not tracking this callee. 1644 1645 // If we are tracking this callee, propagate the result of the function 1646 // into this call site. 1647 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 1648 mergeInValue(getStructValueState(&CB, i), &CB, 1649 TrackedMultipleRetVals[std::make_pair(F, i)], 1650 getMaxWidenStepsOpts()); 1651 } else { 1652 auto TFRVI = TrackedRetVals.find(F); 1653 if (TFRVI == TrackedRetVals.end()) 1654 return handleCallOverdefined(CB); // Not tracking this callee. 1655 1656 // If so, propagate the return value of the callee into this call result. 1657 mergeInValue(&CB, TFRVI->second, getMaxWidenStepsOpts()); 1658 } 1659 } 1660 1661 void SCCPInstVisitor::solve() { 1662 // Process the work lists until they are empty! 1663 while (!BBWorkList.empty() || !InstWorkList.empty() || 1664 !OverdefinedInstWorkList.empty()) { 1665 // Process the overdefined instruction's work list first, which drives other 1666 // things to overdefined more quickly. 1667 while (!OverdefinedInstWorkList.empty()) { 1668 Value *I = OverdefinedInstWorkList.pop_back_val(); 1669 1670 LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n'); 1671 1672 // "I" got into the work list because it either made the transition from 1673 // bottom to constant, or to overdefined. 1674 // 1675 // Anything on this worklist that is overdefined need not be visited 1676 // since all of its users will have already been marked as overdefined 1677 // Update all of the users of this instruction's value. 1678 // 1679 markUsersAsChanged(I); 1680 } 1681 1682 // Process the instruction work list. 1683 while (!InstWorkList.empty()) { 1684 Value *I = InstWorkList.pop_back_val(); 1685 1686 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n'); 1687 1688 // "I" got into the work list because it made the transition from undef to 1689 // constant. 1690 // 1691 // Anything on this worklist that is overdefined need not be visited 1692 // since all of its users will have already been marked as overdefined. 1693 // Update all of the users of this instruction's value. 1694 // 1695 if (I->getType()->isStructTy() || !getValueState(I).isOverdefined()) 1696 markUsersAsChanged(I); 1697 } 1698 1699 // Process the basic block work list. 1700 while (!BBWorkList.empty()) { 1701 BasicBlock *BB = BBWorkList.pop_back_val(); 1702 1703 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n'); 1704 1705 // Notify all instructions in this basic block that they are newly 1706 // executable. 1707 visit(BB); 1708 } 1709 } 1710 } 1711 1712 /// While solving the dataflow for a function, we don't compute a result for 1713 /// operations with an undef operand, to allow undef to be lowered to a 1714 /// constant later. For example, constant folding of "zext i8 undef to i16" 1715 /// would result in "i16 0", and if undef is later lowered to "i8 1", then the 1716 /// zext result would become "i16 1" and would result into an overdefined 1717 /// lattice value once merged with the previous result. Not computing the 1718 /// result of the zext (treating undef the same as unknown) allows us to handle 1719 /// a later undef->constant lowering more optimally. 1720 /// 1721 /// However, if the operand remains undef when the solver returns, we do need 1722 /// to assign some result to the instruction (otherwise we would treat it as 1723 /// unreachable). For simplicity, we mark any instructions that are still 1724 /// unknown as overdefined. 1725 bool SCCPInstVisitor::resolvedUndefsIn(Function &F) { 1726 bool MadeChange = false; 1727 for (BasicBlock &BB : F) { 1728 if (!BBExecutable.count(&BB)) 1729 continue; 1730 1731 for (Instruction &I : BB) { 1732 // Look for instructions which produce undef values. 1733 if (I.getType()->isVoidTy()) 1734 continue; 1735 1736 if (auto *STy = dyn_cast<StructType>(I.getType())) { 1737 // Only a few things that can be structs matter for undef. 1738 1739 // Tracked calls must never be marked overdefined in resolvedUndefsIn. 1740 if (auto *CB = dyn_cast<CallBase>(&I)) 1741 if (Function *F = CB->getCalledFunction()) 1742 if (MRVFunctionsTracked.count(F)) 1743 continue; 1744 1745 // extractvalue and insertvalue don't need to be marked; they are 1746 // tracked as precisely as their operands. 1747 if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I)) 1748 continue; 1749 // Send the results of everything else to overdefined. We could be 1750 // more precise than this but it isn't worth bothering. 1751 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1752 ValueLatticeElement &LV = getStructValueState(&I, i); 1753 if (LV.isUnknown()) { 1754 markOverdefined(LV, &I); 1755 MadeChange = true; 1756 } 1757 } 1758 continue; 1759 } 1760 1761 ValueLatticeElement &LV = getValueState(&I); 1762 if (!LV.isUnknown()) 1763 continue; 1764 1765 // There are two reasons a call can have an undef result 1766 // 1. It could be tracked. 1767 // 2. It could be constant-foldable. 1768 // Because of the way we solve return values, tracked calls must 1769 // never be marked overdefined in resolvedUndefsIn. 1770 if (auto *CB = dyn_cast<CallBase>(&I)) 1771 if (Function *F = CB->getCalledFunction()) 1772 if (TrackedRetVals.count(F)) 1773 continue; 1774 1775 if (isa<LoadInst>(I)) { 1776 // A load here means one of two things: a load of undef from a global, 1777 // a load from an unknown pointer. Either way, having it return undef 1778 // is okay. 1779 continue; 1780 } 1781 1782 markOverdefined(&I); 1783 MadeChange = true; 1784 } 1785 } 1786 1787 LLVM_DEBUG(if (MadeChange) dbgs() 1788 << "\nResolved undefs in " << F.getName() << '\n'); 1789 1790 return MadeChange; 1791 } 1792 1793 //===----------------------------------------------------------------------===// 1794 // 1795 // SCCPSolver implementations 1796 // 1797 SCCPSolver::SCCPSolver( 1798 const DataLayout &DL, 1799 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 1800 LLVMContext &Ctx) 1801 : Visitor(new SCCPInstVisitor(DL, std::move(GetTLI), Ctx)) {} 1802 1803 SCCPSolver::~SCCPSolver() = default; 1804 1805 void SCCPSolver::addAnalysis(Function &F, AnalysisResultsForFn A) { 1806 return Visitor->addAnalysis(F, std::move(A)); 1807 } 1808 1809 bool SCCPSolver::markBlockExecutable(BasicBlock *BB) { 1810 return Visitor->markBlockExecutable(BB); 1811 } 1812 1813 const PredicateBase *SCCPSolver::getPredicateInfoFor(Instruction *I) { 1814 return Visitor->getPredicateInfoFor(I); 1815 } 1816 1817 const LoopInfo &SCCPSolver::getLoopInfo(Function &F) { 1818 return Visitor->getLoopInfo(F); 1819 } 1820 1821 DomTreeUpdater SCCPSolver::getDTU(Function &F) { return Visitor->getDTU(F); } 1822 1823 void SCCPSolver::trackValueOfGlobalVariable(GlobalVariable *GV) { 1824 Visitor->trackValueOfGlobalVariable(GV); 1825 } 1826 1827 void SCCPSolver::addTrackedFunction(Function *F) { 1828 Visitor->addTrackedFunction(F); 1829 } 1830 1831 void SCCPSolver::addToMustPreserveReturnsInFunctions(Function *F) { 1832 Visitor->addToMustPreserveReturnsInFunctions(F); 1833 } 1834 1835 bool SCCPSolver::mustPreserveReturn(Function *F) { 1836 return Visitor->mustPreserveReturn(F); 1837 } 1838 1839 void SCCPSolver::addArgumentTrackedFunction(Function *F) { 1840 Visitor->addArgumentTrackedFunction(F); 1841 } 1842 1843 bool SCCPSolver::isArgumentTrackedFunction(Function *F) { 1844 return Visitor->isArgumentTrackedFunction(F); 1845 } 1846 1847 void SCCPSolver::solve() { Visitor->solve(); } 1848 1849 bool SCCPSolver::resolvedUndefsIn(Function &F) { 1850 return Visitor->resolvedUndefsIn(F); 1851 } 1852 1853 void SCCPSolver::solveWhileResolvedUndefsIn(Module &M) { 1854 Visitor->solveWhileResolvedUndefsIn(M); 1855 } 1856 1857 void 1858 SCCPSolver::solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) { 1859 Visitor->solveWhileResolvedUndefsIn(WorkList); 1860 } 1861 1862 bool SCCPSolver::isBlockExecutable(BasicBlock *BB) const { 1863 return Visitor->isBlockExecutable(BB); 1864 } 1865 1866 bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const { 1867 return Visitor->isEdgeFeasible(From, To); 1868 } 1869 1870 std::vector<ValueLatticeElement> 1871 SCCPSolver::getStructLatticeValueFor(Value *V) const { 1872 return Visitor->getStructLatticeValueFor(V); 1873 } 1874 1875 void SCCPSolver::removeLatticeValueFor(Value *V) { 1876 return Visitor->removeLatticeValueFor(V); 1877 } 1878 1879 const ValueLatticeElement &SCCPSolver::getLatticeValueFor(Value *V) const { 1880 return Visitor->getLatticeValueFor(V); 1881 } 1882 1883 const MapVector<Function *, ValueLatticeElement> & 1884 SCCPSolver::getTrackedRetVals() { 1885 return Visitor->getTrackedRetVals(); 1886 } 1887 1888 const DenseMap<GlobalVariable *, ValueLatticeElement> & 1889 SCCPSolver::getTrackedGlobals() { 1890 return Visitor->getTrackedGlobals(); 1891 } 1892 1893 const SmallPtrSet<Function *, 16> SCCPSolver::getMRVFunctionsTracked() { 1894 return Visitor->getMRVFunctionsTracked(); 1895 } 1896 1897 void SCCPSolver::markOverdefined(Value *V) { Visitor->markOverdefined(V); } 1898 1899 bool SCCPSolver::isStructLatticeConstant(Function *F, StructType *STy) { 1900 return Visitor->isStructLatticeConstant(F, STy); 1901 } 1902 1903 Constant *SCCPSolver::getConstant(const ValueLatticeElement &LV) const { 1904 return Visitor->getConstant(LV); 1905 } 1906 1907 SmallPtrSetImpl<Function *> &SCCPSolver::getArgumentTrackedFunctions() { 1908 return Visitor->getArgumentTrackedFunctions(); 1909 } 1910 1911 void SCCPSolver::markArgInFuncSpecialization( 1912 Function *F, const SmallVectorImpl<ArgInfo> &Args) { 1913 Visitor->markArgInFuncSpecialization(F, Args); 1914 } 1915 1916 void SCCPSolver::markFunctionUnreachable(Function *F) { 1917 Visitor->markFunctionUnreachable(F); 1918 } 1919 1920 void SCCPSolver::visit(Instruction *I) { Visitor->visit(I); } 1921 1922 void SCCPSolver::visitCall(CallInst &I) { Visitor->visitCall(I); } 1923