1 //===- SCCPSolver.cpp - SCCP Utility --------------------------- *- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements the Sparse Conditional Constant Propagation (SCCP)
11 // utility.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/Utils/SCCPSolver.h"
16 #include "llvm/Analysis/ConstantFolding.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/ValueLattice.h"
19 #include "llvm/Analysis/ValueLatticeUtils.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/IR/InstVisitor.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include <cassert>
28 #include <utility>
29 #include <vector>
30
31 using namespace llvm;
32
33 #define DEBUG_TYPE "sccp"
34
35 // The maximum number of range extensions allowed for operations requiring
36 // widening.
37 static const unsigned MaxNumRangeExtensions = 10;
38
39 /// Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
getMaxWidenStepsOpts()40 static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts() {
41 return ValueLatticeElement::MergeOptions().setMaxWidenSteps(
42 MaxNumRangeExtensions);
43 }
44
45 namespace llvm {
46
isConstant(const ValueLatticeElement & LV)47 bool SCCPSolver::isConstant(const ValueLatticeElement &LV) {
48 return LV.isConstant() ||
49 (LV.isConstantRange() && LV.getConstantRange().isSingleElement());
50 }
51
isOverdefined(const ValueLatticeElement & LV)52 bool SCCPSolver::isOverdefined(const ValueLatticeElement &LV) {
53 return !LV.isUnknownOrUndef() && !SCCPSolver::isConstant(LV);
54 }
55
canRemoveInstruction(Instruction * I)56 static bool canRemoveInstruction(Instruction *I) {
57 if (wouldInstructionBeTriviallyDead(I))
58 return true;
59
60 // Some instructions can be handled but are rejected above. Catch
61 // those cases by falling through to here.
62 // TODO: Mark globals as being constant earlier, so
63 // TODO: wouldInstructionBeTriviallyDead() knows that atomic loads
64 // TODO: are safe to remove.
65 return isa<LoadInst>(I);
66 }
67
tryToReplaceWithConstant(Value * V)68 bool SCCPSolver::tryToReplaceWithConstant(Value *V) {
69 Constant *Const = getConstantOrNull(V);
70 if (!Const)
71 return false;
72 // Replacing `musttail` instructions with constant breaks `musttail` invariant
73 // unless the call itself can be removed.
74 // Calls with "clang.arc.attachedcall" implicitly use the return value and
75 // those uses cannot be updated with a constant.
76 CallBase *CB = dyn_cast<CallBase>(V);
77 if (CB && ((CB->isMustTailCall() &&
78 !canRemoveInstruction(CB)) ||
79 CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall))) {
80 Function *F = CB->getCalledFunction();
81
82 // Don't zap returns of the callee
83 if (F)
84 addToMustPreserveReturnsInFunctions(F);
85
86 LLVM_DEBUG(dbgs() << " Can\'t treat the result of call " << *CB
87 << " as a constant\n");
88 return false;
89 }
90
91 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
92
93 // Replaces all of the uses of a variable with uses of the constant.
94 V->replaceAllUsesWith(Const);
95 return true;
96 }
97
98 /// Try to use \p Inst's value range from \p Solver to infer the NUW flag.
refineInstruction(SCCPSolver & Solver,const SmallPtrSetImpl<Value * > & InsertedValues,Instruction & Inst)99 static bool refineInstruction(SCCPSolver &Solver,
100 const SmallPtrSetImpl<Value *> &InsertedValues,
101 Instruction &Inst) {
102 bool Changed = false;
103 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
104 if (auto *Const = dyn_cast<Constant>(Op))
105 return Const->toConstantRange();
106 if (InsertedValues.contains(Op)) {
107 unsigned Bitwidth = Op->getType()->getScalarSizeInBits();
108 return ConstantRange::getFull(Bitwidth);
109 }
110 return Solver.getLatticeValueFor(Op).asConstantRange(
111 Op->getType(), /*UndefAllowed=*/false);
112 };
113
114 if (isa<OverflowingBinaryOperator>(Inst)) {
115 if (Inst.hasNoSignedWrap() && Inst.hasNoUnsignedWrap())
116 return false;
117
118 auto RangeA = GetRange(Inst.getOperand(0));
119 auto RangeB = GetRange(Inst.getOperand(1));
120 if (!Inst.hasNoUnsignedWrap()) {
121 auto NUWRange = ConstantRange::makeGuaranteedNoWrapRegion(
122 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
123 OverflowingBinaryOperator::NoUnsignedWrap);
124 if (NUWRange.contains(RangeA)) {
125 Inst.setHasNoUnsignedWrap();
126 Changed = true;
127 }
128 }
129 if (!Inst.hasNoSignedWrap()) {
130 auto NSWRange = ConstantRange::makeGuaranteedNoWrapRegion(
131 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
132 OverflowingBinaryOperator::NoSignedWrap);
133 if (NSWRange.contains(RangeA)) {
134 Inst.setHasNoSignedWrap();
135 Changed = true;
136 }
137 }
138 } else if (isa<PossiblyNonNegInst>(Inst) && !Inst.hasNonNeg()) {
139 auto Range = GetRange(Inst.getOperand(0));
140 if (Range.isAllNonNegative()) {
141 Inst.setNonNeg();
142 Changed = true;
143 }
144 } else if (TruncInst *TI = dyn_cast<TruncInst>(&Inst)) {
145 if (TI->hasNoSignedWrap() && TI->hasNoUnsignedWrap())
146 return false;
147
148 auto Range = GetRange(Inst.getOperand(0));
149 uint64_t DestWidth = TI->getDestTy()->getScalarSizeInBits();
150 if (!TI->hasNoUnsignedWrap()) {
151 if (Range.getActiveBits() <= DestWidth) {
152 TI->setHasNoUnsignedWrap(true);
153 Changed = true;
154 }
155 }
156 if (!TI->hasNoSignedWrap()) {
157 if (Range.getMinSignedBits() <= DestWidth) {
158 TI->setHasNoSignedWrap(true);
159 Changed = true;
160 }
161 }
162 }
163
164 return Changed;
165 }
166
167 /// Try to replace signed instructions with their unsigned equivalent.
replaceSignedInst(SCCPSolver & Solver,SmallPtrSetImpl<Value * > & InsertedValues,Instruction & Inst)168 static bool replaceSignedInst(SCCPSolver &Solver,
169 SmallPtrSetImpl<Value *> &InsertedValues,
170 Instruction &Inst) {
171 // Determine if a signed value is known to be >= 0.
172 auto isNonNegative = [&Solver](Value *V) {
173 // If this value was constant-folded, it may not have a solver entry.
174 // Handle integers. Otherwise, return false.
175 if (auto *C = dyn_cast<Constant>(V)) {
176 auto *CInt = dyn_cast<ConstantInt>(C);
177 return CInt && !CInt->isNegative();
178 }
179 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V);
180 return IV.isConstantRange(/*UndefAllowed=*/false) &&
181 IV.getConstantRange().isAllNonNegative();
182 };
183
184 Instruction *NewInst = nullptr;
185 switch (Inst.getOpcode()) {
186 case Instruction::SIToFP:
187 case Instruction::SExt: {
188 // If the source value is not negative, this is a zext/uitofp.
189 Value *Op0 = Inst.getOperand(0);
190 if (InsertedValues.count(Op0) || !isNonNegative(Op0))
191 return false;
192 NewInst = CastInst::Create(Inst.getOpcode() == Instruction::SExt
193 ? Instruction::ZExt
194 : Instruction::UIToFP,
195 Op0, Inst.getType(), "", Inst.getIterator());
196 NewInst->setNonNeg();
197 break;
198 }
199 case Instruction::AShr: {
200 // If the shifted value is not negative, this is a logical shift right.
201 Value *Op0 = Inst.getOperand(0);
202 if (InsertedValues.count(Op0) || !isNonNegative(Op0))
203 return false;
204 NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", Inst.getIterator());
205 NewInst->setIsExact(Inst.isExact());
206 break;
207 }
208 case Instruction::SDiv:
209 case Instruction::SRem: {
210 // If both operands are not negative, this is the same as udiv/urem.
211 Value *Op0 = Inst.getOperand(0), *Op1 = Inst.getOperand(1);
212 if (InsertedValues.count(Op0) || InsertedValues.count(Op1) ||
213 !isNonNegative(Op0) || !isNonNegative(Op1))
214 return false;
215 auto NewOpcode = Inst.getOpcode() == Instruction::SDiv ? Instruction::UDiv
216 : Instruction::URem;
217 NewInst = BinaryOperator::Create(NewOpcode, Op0, Op1, "", Inst.getIterator());
218 if (Inst.getOpcode() == Instruction::SDiv)
219 NewInst->setIsExact(Inst.isExact());
220 break;
221 }
222 default:
223 return false;
224 }
225
226 // Wire up the new instruction and update state.
227 assert(NewInst && "Expected replacement instruction");
228 NewInst->takeName(&Inst);
229 InsertedValues.insert(NewInst);
230 Inst.replaceAllUsesWith(NewInst);
231 NewInst->setDebugLoc(Inst.getDebugLoc());
232 Solver.removeLatticeValueFor(&Inst);
233 Inst.eraseFromParent();
234 return true;
235 }
236
simplifyInstsInBlock(BasicBlock & BB,SmallPtrSetImpl<Value * > & InsertedValues,Statistic & InstRemovedStat,Statistic & InstReplacedStat)237 bool SCCPSolver::simplifyInstsInBlock(BasicBlock &BB,
238 SmallPtrSetImpl<Value *> &InsertedValues,
239 Statistic &InstRemovedStat,
240 Statistic &InstReplacedStat) {
241 bool MadeChanges = false;
242 for (Instruction &Inst : make_early_inc_range(BB)) {
243 if (Inst.getType()->isVoidTy())
244 continue;
245 if (tryToReplaceWithConstant(&Inst)) {
246 if (canRemoveInstruction(&Inst))
247 Inst.eraseFromParent();
248
249 MadeChanges = true;
250 ++InstRemovedStat;
251 } else if (replaceSignedInst(*this, InsertedValues, Inst)) {
252 MadeChanges = true;
253 ++InstReplacedStat;
254 } else if (refineInstruction(*this, InsertedValues, Inst)) {
255 MadeChanges = true;
256 }
257 }
258 return MadeChanges;
259 }
260
removeNonFeasibleEdges(BasicBlock * BB,DomTreeUpdater & DTU,BasicBlock * & NewUnreachableBB) const261 bool SCCPSolver::removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU,
262 BasicBlock *&NewUnreachableBB) const {
263 SmallPtrSet<BasicBlock *, 8> FeasibleSuccessors;
264 bool HasNonFeasibleEdges = false;
265 for (BasicBlock *Succ : successors(BB)) {
266 if (isEdgeFeasible(BB, Succ))
267 FeasibleSuccessors.insert(Succ);
268 else
269 HasNonFeasibleEdges = true;
270 }
271
272 // All edges feasible, nothing to do.
273 if (!HasNonFeasibleEdges)
274 return false;
275
276 // SCCP can only determine non-feasible edges for br, switch and indirectbr.
277 Instruction *TI = BB->getTerminator();
278 assert((isa<BranchInst>(TI) || isa<SwitchInst>(TI) ||
279 isa<IndirectBrInst>(TI)) &&
280 "Terminator must be a br, switch or indirectbr");
281
282 if (FeasibleSuccessors.size() == 0) {
283 // Branch on undef/poison, replace with unreachable.
284 SmallPtrSet<BasicBlock *, 8> SeenSuccs;
285 SmallVector<DominatorTree::UpdateType, 8> Updates;
286 for (BasicBlock *Succ : successors(BB)) {
287 Succ->removePredecessor(BB);
288 if (SeenSuccs.insert(Succ).second)
289 Updates.push_back({DominatorTree::Delete, BB, Succ});
290 }
291 TI->eraseFromParent();
292 new UnreachableInst(BB->getContext(), BB);
293 DTU.applyUpdatesPermissive(Updates);
294 } else if (FeasibleSuccessors.size() == 1) {
295 // Replace with an unconditional branch to the only feasible successor.
296 BasicBlock *OnlyFeasibleSuccessor = *FeasibleSuccessors.begin();
297 SmallVector<DominatorTree::UpdateType, 8> Updates;
298 bool HaveSeenOnlyFeasibleSuccessor = false;
299 for (BasicBlock *Succ : successors(BB)) {
300 if (Succ == OnlyFeasibleSuccessor && !HaveSeenOnlyFeasibleSuccessor) {
301 // Don't remove the edge to the only feasible successor the first time
302 // we see it. We still do need to remove any multi-edges to it though.
303 HaveSeenOnlyFeasibleSuccessor = true;
304 continue;
305 }
306
307 Succ->removePredecessor(BB);
308 Updates.push_back({DominatorTree::Delete, BB, Succ});
309 }
310
311 Instruction *BI = BranchInst::Create(OnlyFeasibleSuccessor, BB);
312 BI->setDebugLoc(TI->getDebugLoc());
313 TI->eraseFromParent();
314 DTU.applyUpdatesPermissive(Updates);
315 } else if (FeasibleSuccessors.size() > 1) {
316 SwitchInstProfUpdateWrapper SI(*cast<SwitchInst>(TI));
317 SmallVector<DominatorTree::UpdateType, 8> Updates;
318
319 // If the default destination is unfeasible it will never be taken. Replace
320 // it with a new block with a single Unreachable instruction.
321 BasicBlock *DefaultDest = SI->getDefaultDest();
322 if (!FeasibleSuccessors.contains(DefaultDest)) {
323 if (!NewUnreachableBB) {
324 NewUnreachableBB =
325 BasicBlock::Create(DefaultDest->getContext(), "default.unreachable",
326 DefaultDest->getParent(), DefaultDest);
327 new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB);
328 }
329
330 DefaultDest->removePredecessor(BB);
331 SI->setDefaultDest(NewUnreachableBB);
332 Updates.push_back({DominatorTree::Delete, BB, DefaultDest});
333 Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB});
334 }
335
336 for (auto CI = SI->case_begin(); CI != SI->case_end();) {
337 if (FeasibleSuccessors.contains(CI->getCaseSuccessor())) {
338 ++CI;
339 continue;
340 }
341
342 BasicBlock *Succ = CI->getCaseSuccessor();
343 Succ->removePredecessor(BB);
344 Updates.push_back({DominatorTree::Delete, BB, Succ});
345 SI.removeCase(CI);
346 // Don't increment CI, as we removed a case.
347 }
348
349 DTU.applyUpdatesPermissive(Updates);
350 } else {
351 llvm_unreachable("Must have at least one feasible successor");
352 }
353 return true;
354 }
355
356 /// Helper class for SCCPSolver. This implements the instruction visitor and
357 /// holds all the state.
358 class SCCPInstVisitor : public InstVisitor<SCCPInstVisitor> {
359 const DataLayout &DL;
360 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
361 SmallPtrSet<BasicBlock *, 8> BBExecutable; // The BBs that are executable.
362 DenseMap<Value *, ValueLatticeElement>
363 ValueState; // The state each value is in.
364
365 /// StructValueState - This maintains ValueState for values that have
366 /// StructType, for example for formal arguments, calls, insertelement, etc.
367 DenseMap<std::pair<Value *, unsigned>, ValueLatticeElement> StructValueState;
368
369 /// GlobalValue - If we are tracking any values for the contents of a global
370 /// variable, we keep a mapping from the constant accessor to the element of
371 /// the global, to the currently known value. If the value becomes
372 /// overdefined, it's entry is simply removed from this map.
373 DenseMap<GlobalVariable *, ValueLatticeElement> TrackedGlobals;
374
375 /// TrackedRetVals - If we are tracking arguments into and the return
376 /// value out of a function, it will have an entry in this map, indicating
377 /// what the known return value for the function is.
378 MapVector<Function *, ValueLatticeElement> TrackedRetVals;
379
380 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
381 /// that return multiple values.
382 MapVector<std::pair<Function *, unsigned>, ValueLatticeElement>
383 TrackedMultipleRetVals;
384
385 /// The set of values whose lattice has been invalidated.
386 /// Populated by resetLatticeValueFor(), cleared after resolving undefs.
387 DenseSet<Value *> Invalidated;
388
389 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
390 /// represented here for efficient lookup.
391 SmallPtrSet<Function *, 16> MRVFunctionsTracked;
392
393 /// A list of functions whose return cannot be modified.
394 SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions;
395
396 /// TrackingIncomingArguments - This is the set of functions for whose
397 /// arguments we make optimistic assumptions about and try to prove as
398 /// constants.
399 SmallPtrSet<Function *, 16> TrackingIncomingArguments;
400
401 /// The reason for two worklists is that overdefined is the lowest state
402 /// on the lattice, and moving things to overdefined as fast as possible
403 /// makes SCCP converge much faster.
404 ///
405 /// By having a separate worklist, we accomplish this because everything
406 /// possibly overdefined will become overdefined at the soonest possible
407 /// point.
408 SmallVector<Value *, 64> OverdefinedInstWorkList;
409 SmallVector<Value *, 64> InstWorkList;
410
411 // The BasicBlock work list
412 SmallVector<BasicBlock *, 64> BBWorkList;
413
414 /// KnownFeasibleEdges - Entries in this set are edges which have already had
415 /// PHI nodes retriggered.
416 using Edge = std::pair<BasicBlock *, BasicBlock *>;
417 DenseSet<Edge> KnownFeasibleEdges;
418
419 DenseMap<Function *, std::unique_ptr<PredicateInfo>> FnPredicateInfo;
420
421 DenseMap<Value *, SmallPtrSet<User *, 2>> AdditionalUsers;
422
423 LLVMContext &Ctx;
424
425 private:
getConstantInt(const ValueLatticeElement & IV,Type * Ty) const426 ConstantInt *getConstantInt(const ValueLatticeElement &IV, Type *Ty) const {
427 return dyn_cast_or_null<ConstantInt>(getConstant(IV, Ty));
428 }
429
430 // pushToWorkList - Helper for markConstant/markOverdefined
431 void pushToWorkList(ValueLatticeElement &IV, Value *V);
432
433 // Helper to push \p V to the worklist, after updating it to \p IV. Also
434 // prints a debug message with the updated value.
435 void pushToWorkListMsg(ValueLatticeElement &IV, Value *V);
436
437 // markConstant - Make a value be marked as "constant". If the value
438 // is not already a constant, add it to the instruction work list so that
439 // the users of the instruction are updated later.
440 bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C,
441 bool MayIncludeUndef = false);
442
markConstant(Value * V,Constant * C)443 bool markConstant(Value *V, Constant *C) {
444 assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
445 return markConstant(ValueState[V], V, C);
446 }
447
448 /// markConstantRange - Mark the object as constant range with \p CR. If the
449 /// object is not a constant range with the range \p CR, add it to the
450 /// instruction work list so that the users of the instruction are updated
451 /// later.
452 bool markConstantRange(ValueLatticeElement &IV, Value *V,
453 const ConstantRange &CR);
454
455 // markOverdefined - Make a value be marked as "overdefined". If the
456 // value is not already overdefined, add it to the overdefined instruction
457 // work list so that the users of the instruction are updated later.
458 bool markOverdefined(ValueLatticeElement &IV, Value *V);
459
460 /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
461 /// changes.
462 bool mergeInValue(ValueLatticeElement &IV, Value *V,
463 ValueLatticeElement MergeWithV,
464 ValueLatticeElement::MergeOptions Opts = {
465 /*MayIncludeUndef=*/false, /*CheckWiden=*/false});
466
mergeInValue(Value * V,ValueLatticeElement MergeWithV,ValueLatticeElement::MergeOptions Opts={ false, false})467 bool mergeInValue(Value *V, ValueLatticeElement MergeWithV,
468 ValueLatticeElement::MergeOptions Opts = {
469 /*MayIncludeUndef=*/false, /*CheckWiden=*/false}) {
470 assert(!V->getType()->isStructTy() &&
471 "non-structs should use markConstant");
472 return mergeInValue(ValueState[V], V, MergeWithV, Opts);
473 }
474
475 /// getValueState - Return the ValueLatticeElement object that corresponds to
476 /// the value. This function handles the case when the value hasn't been seen
477 /// yet by properly seeding constants etc.
getValueState(Value * V)478 ValueLatticeElement &getValueState(Value *V) {
479 assert(!V->getType()->isStructTy() && "Should use getStructValueState");
480
481 auto I = ValueState.insert(std::make_pair(V, ValueLatticeElement()));
482 ValueLatticeElement &LV = I.first->second;
483
484 if (!I.second)
485 return LV; // Common case, already in the map.
486
487 if (auto *C = dyn_cast<Constant>(V))
488 LV.markConstant(C); // Constants are constant
489
490 // All others are unknown by default.
491 return LV;
492 }
493
494 /// getStructValueState - Return the ValueLatticeElement object that
495 /// corresponds to the value/field pair. This function handles the case when
496 /// the value hasn't been seen yet by properly seeding constants etc.
getStructValueState(Value * V,unsigned i)497 ValueLatticeElement &getStructValueState(Value *V, unsigned i) {
498 assert(V->getType()->isStructTy() && "Should use getValueState");
499 assert(i < cast<StructType>(V->getType())->getNumElements() &&
500 "Invalid element #");
501
502 auto I = StructValueState.insert(
503 std::make_pair(std::make_pair(V, i), ValueLatticeElement()));
504 ValueLatticeElement &LV = I.first->second;
505
506 if (!I.second)
507 return LV; // Common case, already in the map.
508
509 if (auto *C = dyn_cast<Constant>(V)) {
510 Constant *Elt = C->getAggregateElement(i);
511
512 if (!Elt)
513 LV.markOverdefined(); // Unknown sort of constant.
514 else
515 LV.markConstant(Elt); // Constants are constant.
516 }
517
518 // All others are underdefined by default.
519 return LV;
520 }
521
522 /// Traverse the use-def chain of \p Call, marking itself and its users as
523 /// "unknown" on the way.
invalidate(CallBase * Call)524 void invalidate(CallBase *Call) {
525 SmallVector<Instruction *, 64> ToInvalidate;
526 ToInvalidate.push_back(Call);
527
528 while (!ToInvalidate.empty()) {
529 Instruction *Inst = ToInvalidate.pop_back_val();
530
531 if (!Invalidated.insert(Inst).second)
532 continue;
533
534 if (!BBExecutable.count(Inst->getParent()))
535 continue;
536
537 Value *V = nullptr;
538 // For return instructions we need to invalidate the tracked returns map.
539 // Anything else has its lattice in the value map.
540 if (auto *RetInst = dyn_cast<ReturnInst>(Inst)) {
541 Function *F = RetInst->getParent()->getParent();
542 if (auto It = TrackedRetVals.find(F); It != TrackedRetVals.end()) {
543 It->second = ValueLatticeElement();
544 V = F;
545 } else if (MRVFunctionsTracked.count(F)) {
546 auto *STy = cast<StructType>(F->getReturnType());
547 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
548 TrackedMultipleRetVals[{F, I}] = ValueLatticeElement();
549 V = F;
550 }
551 } else if (auto *STy = dyn_cast<StructType>(Inst->getType())) {
552 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
553 if (auto It = StructValueState.find({Inst, I});
554 It != StructValueState.end()) {
555 It->second = ValueLatticeElement();
556 V = Inst;
557 }
558 }
559 } else if (auto It = ValueState.find(Inst); It != ValueState.end()) {
560 It->second = ValueLatticeElement();
561 V = Inst;
562 }
563
564 if (V) {
565 LLVM_DEBUG(dbgs() << "Invalidated lattice for " << *V << "\n");
566
567 for (User *U : V->users())
568 if (auto *UI = dyn_cast<Instruction>(U))
569 ToInvalidate.push_back(UI);
570
571 auto It = AdditionalUsers.find(V);
572 if (It != AdditionalUsers.end())
573 for (User *U : It->second)
574 if (auto *UI = dyn_cast<Instruction>(U))
575 ToInvalidate.push_back(UI);
576 }
577 }
578 }
579
580 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
581 /// work list if it is not already executable.
582 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
583
584 // getFeasibleSuccessors - Return a vector of booleans to indicate which
585 // successors are reachable from a given terminator instruction.
586 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs);
587
588 // OperandChangedState - This method is invoked on all of the users of an
589 // instruction that was just changed state somehow. Based on this
590 // information, we need to update the specified user of this instruction.
operandChangedState(Instruction * I)591 void operandChangedState(Instruction *I) {
592 if (BBExecutable.count(I->getParent())) // Inst is executable?
593 visit(*I);
594 }
595
596 // Add U as additional user of V.
addAdditionalUser(Value * V,User * U)597 void addAdditionalUser(Value *V, User *U) {
598 auto Iter = AdditionalUsers.insert({V, {}});
599 Iter.first->second.insert(U);
600 }
601
602 // Mark I's users as changed, including AdditionalUsers.
markUsersAsChanged(Value * I)603 void markUsersAsChanged(Value *I) {
604 // Functions include their arguments in the use-list. Changed function
605 // values mean that the result of the function changed. We only need to
606 // update the call sites with the new function result and do not have to
607 // propagate the call arguments.
608 if (isa<Function>(I)) {
609 for (User *U : I->users()) {
610 if (auto *CB = dyn_cast<CallBase>(U))
611 handleCallResult(*CB);
612 }
613 } else {
614 for (User *U : I->users())
615 if (auto *UI = dyn_cast<Instruction>(U))
616 operandChangedState(UI);
617 }
618
619 auto Iter = AdditionalUsers.find(I);
620 if (Iter != AdditionalUsers.end()) {
621 // Copy additional users before notifying them of changes, because new
622 // users may be added, potentially invalidating the iterator.
623 SmallVector<Instruction *, 2> ToNotify;
624 for (User *U : Iter->second)
625 if (auto *UI = dyn_cast<Instruction>(U))
626 ToNotify.push_back(UI);
627 for (Instruction *UI : ToNotify)
628 operandChangedState(UI);
629 }
630 }
631 void handleCallOverdefined(CallBase &CB);
632 void handleCallResult(CallBase &CB);
633 void handleCallArguments(CallBase &CB);
634 void handleExtractOfWithOverflow(ExtractValueInst &EVI,
635 const WithOverflowInst *WO, unsigned Idx);
636
637 private:
638 friend class InstVisitor<SCCPInstVisitor>;
639
640 // visit implementations - Something changed in this instruction. Either an
641 // operand made a transition, or the instruction is newly executable. Change
642 // the value type of I to reflect these changes if appropriate.
643 void visitPHINode(PHINode &I);
644
645 // Terminators
646
647 void visitReturnInst(ReturnInst &I);
648 void visitTerminator(Instruction &TI);
649
650 void visitCastInst(CastInst &I);
651 void visitSelectInst(SelectInst &I);
652 void visitUnaryOperator(Instruction &I);
653 void visitFreezeInst(FreezeInst &I);
654 void visitBinaryOperator(Instruction &I);
655 void visitCmpInst(CmpInst &I);
656 void visitExtractValueInst(ExtractValueInst &EVI);
657 void visitInsertValueInst(InsertValueInst &IVI);
658
visitCatchSwitchInst(CatchSwitchInst & CPI)659 void visitCatchSwitchInst(CatchSwitchInst &CPI) {
660 markOverdefined(&CPI);
661 visitTerminator(CPI);
662 }
663
664 // Instructions that cannot be folded away.
665
666 void visitStoreInst(StoreInst &I);
667 void visitLoadInst(LoadInst &I);
668 void visitGetElementPtrInst(GetElementPtrInst &I);
669
visitInvokeInst(InvokeInst & II)670 void visitInvokeInst(InvokeInst &II) {
671 visitCallBase(II);
672 visitTerminator(II);
673 }
674
visitCallBrInst(CallBrInst & CBI)675 void visitCallBrInst(CallBrInst &CBI) {
676 visitCallBase(CBI);
677 visitTerminator(CBI);
678 }
679
680 void visitCallBase(CallBase &CB);
visitResumeInst(ResumeInst & I)681 void visitResumeInst(ResumeInst &I) { /*returns void*/
682 }
visitUnreachableInst(UnreachableInst & I)683 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/
684 }
visitFenceInst(FenceInst & I)685 void visitFenceInst(FenceInst &I) { /*returns void*/
686 }
687
688 void visitInstruction(Instruction &I);
689
690 public:
addPredicateInfo(Function & F,DominatorTree & DT,AssumptionCache & AC)691 void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC) {
692 FnPredicateInfo.insert({&F, std::make_unique<PredicateInfo>(F, DT, AC)});
693 }
694
visitCallInst(CallInst & I)695 void visitCallInst(CallInst &I) { visitCallBase(I); }
696
697 bool markBlockExecutable(BasicBlock *BB);
698
getPredicateInfoFor(Instruction * I)699 const PredicateBase *getPredicateInfoFor(Instruction *I) {
700 auto It = FnPredicateInfo.find(I->getParent()->getParent());
701 if (It == FnPredicateInfo.end())
702 return nullptr;
703 return It->second->getPredicateInfoFor(I);
704 }
705
SCCPInstVisitor(const DataLayout & DL,std::function<const TargetLibraryInfo & (Function &)> GetTLI,LLVMContext & Ctx)706 SCCPInstVisitor(const DataLayout &DL,
707 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
708 LLVMContext &Ctx)
709 : DL(DL), GetTLI(GetTLI), Ctx(Ctx) {}
710
trackValueOfGlobalVariable(GlobalVariable * GV)711 void trackValueOfGlobalVariable(GlobalVariable *GV) {
712 // We only track the contents of scalar globals.
713 if (GV->getValueType()->isSingleValueType()) {
714 ValueLatticeElement &IV = TrackedGlobals[GV];
715 IV.markConstant(GV->getInitializer());
716 }
717 }
718
addTrackedFunction(Function * F)719 void addTrackedFunction(Function *F) {
720 // Add an entry, F -> undef.
721 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
722 MRVFunctionsTracked.insert(F);
723 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
724 TrackedMultipleRetVals.insert(
725 std::make_pair(std::make_pair(F, i), ValueLatticeElement()));
726 } else if (!F->getReturnType()->isVoidTy())
727 TrackedRetVals.insert(std::make_pair(F, ValueLatticeElement()));
728 }
729
addToMustPreserveReturnsInFunctions(Function * F)730 void addToMustPreserveReturnsInFunctions(Function *F) {
731 MustPreserveReturnsInFunctions.insert(F);
732 }
733
mustPreserveReturn(Function * F)734 bool mustPreserveReturn(Function *F) {
735 return MustPreserveReturnsInFunctions.count(F);
736 }
737
addArgumentTrackedFunction(Function * F)738 void addArgumentTrackedFunction(Function *F) {
739 TrackingIncomingArguments.insert(F);
740 }
741
isArgumentTrackedFunction(Function * F)742 bool isArgumentTrackedFunction(Function *F) {
743 return TrackingIncomingArguments.count(F);
744 }
745
746 void solve();
747
748 bool resolvedUndef(Instruction &I);
749
750 bool resolvedUndefsIn(Function &F);
751
isBlockExecutable(BasicBlock * BB) const752 bool isBlockExecutable(BasicBlock *BB) const {
753 return BBExecutable.count(BB);
754 }
755
756 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const;
757
getStructLatticeValueFor(Value * V) const758 std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const {
759 std::vector<ValueLatticeElement> StructValues;
760 auto *STy = dyn_cast<StructType>(V->getType());
761 assert(STy && "getStructLatticeValueFor() can be called only on structs");
762 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
763 auto I = StructValueState.find(std::make_pair(V, i));
764 assert(I != StructValueState.end() && "Value not in valuemap!");
765 StructValues.push_back(I->second);
766 }
767 return StructValues;
768 }
769
removeLatticeValueFor(Value * V)770 void removeLatticeValueFor(Value *V) { ValueState.erase(V); }
771
772 /// Invalidate the Lattice Value of \p Call and its users after specializing
773 /// the call. Then recompute it.
resetLatticeValueFor(CallBase * Call)774 void resetLatticeValueFor(CallBase *Call) {
775 // Calls to void returning functions do not need invalidation.
776 Function *F = Call->getCalledFunction();
777 (void)F;
778 assert(!F->getReturnType()->isVoidTy() &&
779 (TrackedRetVals.count(F) || MRVFunctionsTracked.count(F)) &&
780 "All non void specializations should be tracked");
781 invalidate(Call);
782 handleCallResult(*Call);
783 }
784
getLatticeValueFor(Value * V) const785 const ValueLatticeElement &getLatticeValueFor(Value *V) const {
786 assert(!V->getType()->isStructTy() &&
787 "Should use getStructLatticeValueFor");
788 DenseMap<Value *, ValueLatticeElement>::const_iterator I =
789 ValueState.find(V);
790 assert(I != ValueState.end() &&
791 "V not found in ValueState nor Paramstate map!");
792 return I->second;
793 }
794
getTrackedRetVals()795 const MapVector<Function *, ValueLatticeElement> &getTrackedRetVals() {
796 return TrackedRetVals;
797 }
798
getTrackedGlobals()799 const DenseMap<GlobalVariable *, ValueLatticeElement> &getTrackedGlobals() {
800 return TrackedGlobals;
801 }
802
getMRVFunctionsTracked()803 const SmallPtrSet<Function *, 16> getMRVFunctionsTracked() {
804 return MRVFunctionsTracked;
805 }
806
markOverdefined(Value * V)807 void markOverdefined(Value *V) {
808 if (auto *STy = dyn_cast<StructType>(V->getType()))
809 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
810 markOverdefined(getStructValueState(V, i), V);
811 else
812 markOverdefined(ValueState[V], V);
813 }
814
trackValueOfArgument(Argument * A)815 void trackValueOfArgument(Argument *A) {
816 if (A->getType()->isIntOrIntVectorTy()) {
817 if (std::optional<ConstantRange> Range = A->getRange()) {
818 markConstantRange(ValueState[A], A, *Range);
819 return;
820 }
821 }
822 // Assume nothing about the incoming arguments without range.
823 markOverdefined(A);
824 }
825
826 bool isStructLatticeConstant(Function *F, StructType *STy);
827
828 Constant *getConstant(const ValueLatticeElement &LV, Type *Ty) const;
829
830 Constant *getConstantOrNull(Value *V) const;
831
getArgumentTrackedFunctions()832 SmallPtrSetImpl<Function *> &getArgumentTrackedFunctions() {
833 return TrackingIncomingArguments;
834 }
835
836 void setLatticeValueForSpecializationArguments(Function *F,
837 const SmallVectorImpl<ArgInfo> &Args);
838
markFunctionUnreachable(Function * F)839 void markFunctionUnreachable(Function *F) {
840 for (auto &BB : *F)
841 BBExecutable.erase(&BB);
842 }
843
solveWhileResolvedUndefsIn(Module & M)844 void solveWhileResolvedUndefsIn(Module &M) {
845 bool ResolvedUndefs = true;
846 while (ResolvedUndefs) {
847 solve();
848 ResolvedUndefs = false;
849 for (Function &F : M)
850 ResolvedUndefs |= resolvedUndefsIn(F);
851 }
852 }
853
solveWhileResolvedUndefsIn(SmallVectorImpl<Function * > & WorkList)854 void solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) {
855 bool ResolvedUndefs = true;
856 while (ResolvedUndefs) {
857 solve();
858 ResolvedUndefs = false;
859 for (Function *F : WorkList)
860 ResolvedUndefs |= resolvedUndefsIn(*F);
861 }
862 }
863
solveWhileResolvedUndefs()864 void solveWhileResolvedUndefs() {
865 bool ResolvedUndefs = true;
866 while (ResolvedUndefs) {
867 solve();
868 ResolvedUndefs = false;
869 for (Value *V : Invalidated)
870 if (auto *I = dyn_cast<Instruction>(V))
871 ResolvedUndefs |= resolvedUndef(*I);
872 }
873 Invalidated.clear();
874 }
875 };
876
877 } // namespace llvm
878
markBlockExecutable(BasicBlock * BB)879 bool SCCPInstVisitor::markBlockExecutable(BasicBlock *BB) {
880 if (!BBExecutable.insert(BB).second)
881 return false;
882 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
883 BBWorkList.push_back(BB); // Add the block to the work list!
884 return true;
885 }
886
pushToWorkList(ValueLatticeElement & IV,Value * V)887 void SCCPInstVisitor::pushToWorkList(ValueLatticeElement &IV, Value *V) {
888 if (IV.isOverdefined()) {
889 if (OverdefinedInstWorkList.empty() || OverdefinedInstWorkList.back() != V)
890 OverdefinedInstWorkList.push_back(V);
891 return;
892 }
893 if (InstWorkList.empty() || InstWorkList.back() != V)
894 InstWorkList.push_back(V);
895 }
896
pushToWorkListMsg(ValueLatticeElement & IV,Value * V)897 void SCCPInstVisitor::pushToWorkListMsg(ValueLatticeElement &IV, Value *V) {
898 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n');
899 pushToWorkList(IV, V);
900 }
901
markConstant(ValueLatticeElement & IV,Value * V,Constant * C,bool MayIncludeUndef)902 bool SCCPInstVisitor::markConstant(ValueLatticeElement &IV, Value *V,
903 Constant *C, bool MayIncludeUndef) {
904 if (!IV.markConstant(C, MayIncludeUndef))
905 return false;
906 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
907 pushToWorkList(IV, V);
908 return true;
909 }
910
markConstantRange(ValueLatticeElement & IV,Value * V,const ConstantRange & CR)911 bool SCCPInstVisitor::markConstantRange(ValueLatticeElement &IV, Value *V,
912 const ConstantRange &CR) {
913 if (!IV.markConstantRange(CR))
914 return false;
915 LLVM_DEBUG(dbgs() << "markConstantRange: " << CR << ": " << *V << '\n');
916 pushToWorkList(IV, V);
917 return true;
918 }
919
markOverdefined(ValueLatticeElement & IV,Value * V)920 bool SCCPInstVisitor::markOverdefined(ValueLatticeElement &IV, Value *V) {
921 if (!IV.markOverdefined())
922 return false;
923
924 LLVM_DEBUG(dbgs() << "markOverdefined: ";
925 if (auto *F = dyn_cast<Function>(V)) dbgs()
926 << "Function '" << F->getName() << "'\n";
927 else dbgs() << *V << '\n');
928 // Only instructions go on the work list
929 pushToWorkList(IV, V);
930 return true;
931 }
932
isStructLatticeConstant(Function * F,StructType * STy)933 bool SCCPInstVisitor::isStructLatticeConstant(Function *F, StructType *STy) {
934 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
935 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
936 assert(It != TrackedMultipleRetVals.end());
937 ValueLatticeElement LV = It->second;
938 if (!SCCPSolver::isConstant(LV))
939 return false;
940 }
941 return true;
942 }
943
getConstant(const ValueLatticeElement & LV,Type * Ty) const944 Constant *SCCPInstVisitor::getConstant(const ValueLatticeElement &LV,
945 Type *Ty) const {
946 if (LV.isConstant()) {
947 Constant *C = LV.getConstant();
948 assert(C->getType() == Ty && "Type mismatch");
949 return C;
950 }
951
952 if (LV.isConstantRange()) {
953 const auto &CR = LV.getConstantRange();
954 if (CR.getSingleElement())
955 return ConstantInt::get(Ty, *CR.getSingleElement());
956 }
957 return nullptr;
958 }
959
getConstantOrNull(Value * V) const960 Constant *SCCPInstVisitor::getConstantOrNull(Value *V) const {
961 Constant *Const = nullptr;
962 if (V->getType()->isStructTy()) {
963 std::vector<ValueLatticeElement> LVs = getStructLatticeValueFor(V);
964 if (any_of(LVs, SCCPSolver::isOverdefined))
965 return nullptr;
966 std::vector<Constant *> ConstVals;
967 auto *ST = cast<StructType>(V->getType());
968 for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
969 ValueLatticeElement LV = LVs[I];
970 ConstVals.push_back(SCCPSolver::isConstant(LV)
971 ? getConstant(LV, ST->getElementType(I))
972 : UndefValue::get(ST->getElementType(I)));
973 }
974 Const = ConstantStruct::get(ST, ConstVals);
975 } else {
976 const ValueLatticeElement &LV = getLatticeValueFor(V);
977 if (SCCPSolver::isOverdefined(LV))
978 return nullptr;
979 Const = SCCPSolver::isConstant(LV) ? getConstant(LV, V->getType())
980 : UndefValue::get(V->getType());
981 }
982 assert(Const && "Constant is nullptr here!");
983 return Const;
984 }
985
setLatticeValueForSpecializationArguments(Function * F,const SmallVectorImpl<ArgInfo> & Args)986 void SCCPInstVisitor::setLatticeValueForSpecializationArguments(Function *F,
987 const SmallVectorImpl<ArgInfo> &Args) {
988 assert(!Args.empty() && "Specialization without arguments");
989 assert(F->arg_size() == Args[0].Formal->getParent()->arg_size() &&
990 "Functions should have the same number of arguments");
991
992 auto Iter = Args.begin();
993 Function::arg_iterator NewArg = F->arg_begin();
994 Function::arg_iterator OldArg = Args[0].Formal->getParent()->arg_begin();
995 for (auto End = F->arg_end(); NewArg != End; ++NewArg, ++OldArg) {
996
997 LLVM_DEBUG(dbgs() << "SCCP: Marking argument "
998 << NewArg->getNameOrAsOperand() << "\n");
999
1000 // Mark the argument constants in the new function
1001 // or copy the lattice state over from the old function.
1002 if (Iter != Args.end() && Iter->Formal == &*OldArg) {
1003 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1004 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1005 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1006 NewValue.markConstant(Iter->Actual->getAggregateElement(I));
1007 }
1008 } else {
1009 ValueState[&*NewArg].markConstant(Iter->Actual);
1010 }
1011 ++Iter;
1012 } else {
1013 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1014 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1015 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1016 NewValue = StructValueState[{&*OldArg, I}];
1017 }
1018 } else {
1019 ValueLatticeElement &NewValue = ValueState[&*NewArg];
1020 NewValue = ValueState[&*OldArg];
1021 }
1022 }
1023 }
1024 }
1025
visitInstruction(Instruction & I)1026 void SCCPInstVisitor::visitInstruction(Instruction &I) {
1027 // All the instructions we don't do any special handling for just
1028 // go to overdefined.
1029 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
1030 markOverdefined(&I);
1031 }
1032
mergeInValue(ValueLatticeElement & IV,Value * V,ValueLatticeElement MergeWithV,ValueLatticeElement::MergeOptions Opts)1033 bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
1034 ValueLatticeElement MergeWithV,
1035 ValueLatticeElement::MergeOptions Opts) {
1036 if (IV.mergeIn(MergeWithV, Opts)) {
1037 pushToWorkList(IV, V);
1038 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : "
1039 << IV << "\n");
1040 return true;
1041 }
1042 return false;
1043 }
1044
markEdgeExecutable(BasicBlock * Source,BasicBlock * Dest)1045 bool SCCPInstVisitor::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
1046 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
1047 return false; // This edge is already known to be executable!
1048
1049 if (!markBlockExecutable(Dest)) {
1050 // If the destination is already executable, we just made an *edge*
1051 // feasible that wasn't before. Revisit the PHI nodes in the block
1052 // because they have potentially new operands.
1053 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
1054 << " -> " << Dest->getName() << '\n');
1055
1056 for (PHINode &PN : Dest->phis())
1057 visitPHINode(PN);
1058 }
1059 return true;
1060 }
1061
1062 // getFeasibleSuccessors - Return a vector of booleans to indicate which
1063 // successors are reachable from a given terminator instruction.
getFeasibleSuccessors(Instruction & TI,SmallVectorImpl<bool> & Succs)1064 void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
1065 SmallVectorImpl<bool> &Succs) {
1066 Succs.resize(TI.getNumSuccessors());
1067 if (auto *BI = dyn_cast<BranchInst>(&TI)) {
1068 if (BI->isUnconditional()) {
1069 Succs[0] = true;
1070 return;
1071 }
1072
1073 ValueLatticeElement BCValue = getValueState(BI->getCondition());
1074 ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
1075 if (!CI) {
1076 // Overdefined condition variables, and branches on unfoldable constant
1077 // conditions, mean the branch could go either way.
1078 if (!BCValue.isUnknownOrUndef())
1079 Succs[0] = Succs[1] = true;
1080 return;
1081 }
1082
1083 // Constant condition variables mean the branch can only go a single way.
1084 Succs[CI->isZero()] = true;
1085 return;
1086 }
1087
1088 // We cannot analyze special terminators, so consider all successors
1089 // executable.
1090 if (TI.isSpecialTerminator()) {
1091 Succs.assign(TI.getNumSuccessors(), true);
1092 return;
1093 }
1094
1095 if (auto *SI = dyn_cast<SwitchInst>(&TI)) {
1096 if (!SI->getNumCases()) {
1097 Succs[0] = true;
1098 return;
1099 }
1100 const ValueLatticeElement &SCValue = getValueState(SI->getCondition());
1101 if (ConstantInt *CI =
1102 getConstantInt(SCValue, SI->getCondition()->getType())) {
1103 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true;
1104 return;
1105 }
1106
1107 // TODO: Switch on undef is UB. Stop passing false once the rest of LLVM
1108 // is ready.
1109 if (SCValue.isConstantRange(/*UndefAllowed=*/false)) {
1110 const ConstantRange &Range = SCValue.getConstantRange();
1111 unsigned ReachableCaseCount = 0;
1112 for (const auto &Case : SI->cases()) {
1113 const APInt &CaseValue = Case.getCaseValue()->getValue();
1114 if (Range.contains(CaseValue)) {
1115 Succs[Case.getSuccessorIndex()] = true;
1116 ++ReachableCaseCount;
1117 }
1118 }
1119
1120 Succs[SI->case_default()->getSuccessorIndex()] =
1121 Range.isSizeLargerThan(ReachableCaseCount);
1122 return;
1123 }
1124
1125 // Overdefined or unknown condition? All destinations are executable!
1126 if (!SCValue.isUnknownOrUndef())
1127 Succs.assign(TI.getNumSuccessors(), true);
1128 return;
1129 }
1130
1131 // In case of indirect branch and its address is a blockaddress, we mark
1132 // the target as executable.
1133 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
1134 // Casts are folded by visitCastInst.
1135 ValueLatticeElement IBRValue = getValueState(IBR->getAddress());
1136 BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(
1137 getConstant(IBRValue, IBR->getAddress()->getType()));
1138 if (!Addr) { // Overdefined or unknown condition?
1139 // All destinations are executable!
1140 if (!IBRValue.isUnknownOrUndef())
1141 Succs.assign(TI.getNumSuccessors(), true);
1142 return;
1143 }
1144
1145 BasicBlock *T = Addr->getBasicBlock();
1146 assert(Addr->getFunction() == T->getParent() &&
1147 "Block address of a different function ?");
1148 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) {
1149 // This is the target.
1150 if (IBR->getDestination(i) == T) {
1151 Succs[i] = true;
1152 return;
1153 }
1154 }
1155
1156 // If we didn't find our destination in the IBR successor list, then we
1157 // have undefined behavior. Its ok to assume no successor is executable.
1158 return;
1159 }
1160
1161 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
1162 llvm_unreachable("SCCP: Don't know how to handle this terminator!");
1163 }
1164
1165 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
1166 // block to the 'To' basic block is currently feasible.
isEdgeFeasible(BasicBlock * From,BasicBlock * To) const1167 bool SCCPInstVisitor::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const {
1168 // Check if we've called markEdgeExecutable on the edge yet. (We could
1169 // be more aggressive and try to consider edges which haven't been marked
1170 // yet, but there isn't any need.)
1171 return KnownFeasibleEdges.count(Edge(From, To));
1172 }
1173
1174 // visit Implementations - Something changed in this instruction, either an
1175 // operand made a transition, or the instruction is newly executable. Change
1176 // the value type of I to reflect these changes if appropriate. This method
1177 // makes sure to do the following actions:
1178 //
1179 // 1. If a phi node merges two constants in, and has conflicting value coming
1180 // from different branches, or if the PHI node merges in an overdefined
1181 // value, then the PHI node becomes overdefined.
1182 // 2. If a phi node merges only constants in, and they all agree on value, the
1183 // PHI node becomes a constant value equal to that.
1184 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
1185 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
1186 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
1187 // 6. If a conditional branch has a value that is constant, make the selected
1188 // destination executable
1189 // 7. If a conditional branch has a value that is overdefined, make all
1190 // successors executable.
visitPHINode(PHINode & PN)1191 void SCCPInstVisitor::visitPHINode(PHINode &PN) {
1192 // If this PN returns a struct, just mark the result overdefined.
1193 // TODO: We could do a lot better than this if code actually uses this.
1194 if (PN.getType()->isStructTy())
1195 return (void)markOverdefined(&PN);
1196
1197 if (getValueState(&PN).isOverdefined())
1198 return; // Quick exit
1199
1200 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
1201 // and slow us down a lot. Just mark them overdefined.
1202 if (PN.getNumIncomingValues() > 64)
1203 return (void)markOverdefined(&PN);
1204
1205 unsigned NumActiveIncoming = 0;
1206
1207 // Look at all of the executable operands of the PHI node. If any of them
1208 // are overdefined, the PHI becomes overdefined as well. If they are all
1209 // constant, and they agree with each other, the PHI becomes the identical
1210 // constant. If they are constant and don't agree, the PHI is a constant
1211 // range. If there are no executable operands, the PHI remains unknown.
1212 ValueLatticeElement PhiState = getValueState(&PN);
1213 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1214 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
1215 continue;
1216
1217 ValueLatticeElement IV = getValueState(PN.getIncomingValue(i));
1218 PhiState.mergeIn(IV);
1219 NumActiveIncoming++;
1220 if (PhiState.isOverdefined())
1221 break;
1222 }
1223
1224 // We allow up to 1 range extension per active incoming value and one
1225 // additional extension. Note that we manually adjust the number of range
1226 // extensions to match the number of active incoming values. This helps to
1227 // limit multiple extensions caused by the same incoming value, if other
1228 // incoming values are equal.
1229 mergeInValue(&PN, PhiState,
1230 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1231 NumActiveIncoming + 1));
1232 ValueLatticeElement &PhiStateRef = getValueState(&PN);
1233 PhiStateRef.setNumRangeExtensions(
1234 std::max(NumActiveIncoming, PhiStateRef.getNumRangeExtensions()));
1235 }
1236
visitReturnInst(ReturnInst & I)1237 void SCCPInstVisitor::visitReturnInst(ReturnInst &I) {
1238 if (I.getNumOperands() == 0)
1239 return; // ret void
1240
1241 Function *F = I.getParent()->getParent();
1242 Value *ResultOp = I.getOperand(0);
1243
1244 // If we are tracking the return value of this function, merge it in.
1245 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
1246 auto TFRVI = TrackedRetVals.find(F);
1247 if (TFRVI != TrackedRetVals.end()) {
1248 mergeInValue(TFRVI->second, F, getValueState(ResultOp));
1249 return;
1250 }
1251 }
1252
1253 // Handle functions that return multiple values.
1254 if (!TrackedMultipleRetVals.empty()) {
1255 if (auto *STy = dyn_cast<StructType>(ResultOp->getType()))
1256 if (MRVFunctionsTracked.count(F))
1257 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1258 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
1259 getStructValueState(ResultOp, i));
1260 }
1261 }
1262
visitTerminator(Instruction & TI)1263 void SCCPInstVisitor::visitTerminator(Instruction &TI) {
1264 SmallVector<bool, 16> SuccFeasible;
1265 getFeasibleSuccessors(TI, SuccFeasible);
1266
1267 BasicBlock *BB = TI.getParent();
1268
1269 // Mark all feasible successors executable.
1270 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
1271 if (SuccFeasible[i])
1272 markEdgeExecutable(BB, TI.getSuccessor(i));
1273 }
1274
visitCastInst(CastInst & I)1275 void SCCPInstVisitor::visitCastInst(CastInst &I) {
1276 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1277 // discover a concrete value later.
1278 if (ValueState[&I].isOverdefined())
1279 return;
1280
1281 ValueLatticeElement OpSt = getValueState(I.getOperand(0));
1282 if (OpSt.isUnknownOrUndef())
1283 return;
1284
1285 if (Constant *OpC = getConstant(OpSt, I.getOperand(0)->getType())) {
1286 // Fold the constant as we build.
1287 if (Constant *C =
1288 ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL))
1289 return (void)markConstant(&I, C);
1290 }
1291
1292 // Ignore bitcasts, as they may change the number of vector elements.
1293 if (I.getDestTy()->isIntOrIntVectorTy() &&
1294 I.getSrcTy()->isIntOrIntVectorTy() &&
1295 I.getOpcode() != Instruction::BitCast) {
1296 auto &LV = getValueState(&I);
1297 ConstantRange OpRange =
1298 OpSt.asConstantRange(I.getSrcTy(), /*UndefAllowed=*/false);
1299
1300 Type *DestTy = I.getDestTy();
1301 ConstantRange Res =
1302 OpRange.castOp(I.getOpcode(), DestTy->getScalarSizeInBits());
1303 mergeInValue(LV, &I, ValueLatticeElement::getRange(Res));
1304 } else
1305 markOverdefined(&I);
1306 }
1307
handleExtractOfWithOverflow(ExtractValueInst & EVI,const WithOverflowInst * WO,unsigned Idx)1308 void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
1309 const WithOverflowInst *WO,
1310 unsigned Idx) {
1311 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
1312 ValueLatticeElement L = getValueState(LHS);
1313 ValueLatticeElement R = getValueState(RHS);
1314 addAdditionalUser(LHS, &EVI);
1315 addAdditionalUser(RHS, &EVI);
1316 if (L.isUnknownOrUndef() || R.isUnknownOrUndef())
1317 return; // Wait to resolve.
1318
1319 Type *Ty = LHS->getType();
1320 ConstantRange LR = L.asConstantRange(Ty, /*UndefAllowed=*/false);
1321 ConstantRange RR = R.asConstantRange(Ty, /*UndefAllowed=*/false);
1322 if (Idx == 0) {
1323 ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
1324 mergeInValue(&EVI, ValueLatticeElement::getRange(Res));
1325 } else {
1326 assert(Idx == 1 && "Index can only be 0 or 1");
1327 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
1328 WO->getBinaryOp(), RR, WO->getNoWrapKind());
1329 if (NWRegion.contains(LR))
1330 return (void)markConstant(&EVI, ConstantInt::getFalse(EVI.getType()));
1331 markOverdefined(&EVI);
1332 }
1333 }
1334
visitExtractValueInst(ExtractValueInst & EVI)1335 void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
1336 // If this returns a struct, mark all elements over defined, we don't track
1337 // structs in structs.
1338 if (EVI.getType()->isStructTy())
1339 return (void)markOverdefined(&EVI);
1340
1341 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1342 // discover a concrete value later.
1343 if (ValueState[&EVI].isOverdefined())
1344 return (void)markOverdefined(&EVI);
1345
1346 // If this is extracting from more than one level of struct, we don't know.
1347 if (EVI.getNumIndices() != 1)
1348 return (void)markOverdefined(&EVI);
1349
1350 Value *AggVal = EVI.getAggregateOperand();
1351 if (AggVal->getType()->isStructTy()) {
1352 unsigned i = *EVI.idx_begin();
1353 if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
1354 return handleExtractOfWithOverflow(EVI, WO, i);
1355 ValueLatticeElement EltVal = getStructValueState(AggVal, i);
1356 mergeInValue(getValueState(&EVI), &EVI, EltVal);
1357 } else {
1358 // Otherwise, must be extracting from an array.
1359 return (void)markOverdefined(&EVI);
1360 }
1361 }
1362
visitInsertValueInst(InsertValueInst & IVI)1363 void SCCPInstVisitor::visitInsertValueInst(InsertValueInst &IVI) {
1364 auto *STy = dyn_cast<StructType>(IVI.getType());
1365 if (!STy)
1366 return (void)markOverdefined(&IVI);
1367
1368 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1369 // discover a concrete value later.
1370 if (SCCPSolver::isOverdefined(ValueState[&IVI]))
1371 return (void)markOverdefined(&IVI);
1372
1373 // If this has more than one index, we can't handle it, drive all results to
1374 // undef.
1375 if (IVI.getNumIndices() != 1)
1376 return (void)markOverdefined(&IVI);
1377
1378 Value *Aggr = IVI.getAggregateOperand();
1379 unsigned Idx = *IVI.idx_begin();
1380
1381 // Compute the result based on what we're inserting.
1382 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1383 // This passes through all values that aren't the inserted element.
1384 if (i != Idx) {
1385 ValueLatticeElement EltVal = getStructValueState(Aggr, i);
1386 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
1387 continue;
1388 }
1389
1390 Value *Val = IVI.getInsertedValueOperand();
1391 if (Val->getType()->isStructTy())
1392 // We don't track structs in structs.
1393 markOverdefined(getStructValueState(&IVI, i), &IVI);
1394 else {
1395 ValueLatticeElement InVal = getValueState(Val);
1396 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
1397 }
1398 }
1399 }
1400
visitSelectInst(SelectInst & I)1401 void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
1402 // If this select returns a struct, just mark the result overdefined.
1403 // TODO: We could do a lot better than this if code actually uses this.
1404 if (I.getType()->isStructTy())
1405 return (void)markOverdefined(&I);
1406
1407 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1408 // discover a concrete value later.
1409 if (ValueState[&I].isOverdefined())
1410 return (void)markOverdefined(&I);
1411
1412 ValueLatticeElement CondValue = getValueState(I.getCondition());
1413 if (CondValue.isUnknownOrUndef())
1414 return;
1415
1416 if (ConstantInt *CondCB =
1417 getConstantInt(CondValue, I.getCondition()->getType())) {
1418 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
1419 mergeInValue(&I, getValueState(OpVal));
1420 return;
1421 }
1422
1423 // Otherwise, the condition is overdefined or a constant we can't evaluate.
1424 // See if we can produce something better than overdefined based on the T/F
1425 // value.
1426 ValueLatticeElement TVal = getValueState(I.getTrueValue());
1427 ValueLatticeElement FVal = getValueState(I.getFalseValue());
1428
1429 bool Changed = ValueState[&I].mergeIn(TVal);
1430 Changed |= ValueState[&I].mergeIn(FVal);
1431 if (Changed)
1432 pushToWorkListMsg(ValueState[&I], &I);
1433 }
1434
1435 // Handle Unary Operators.
visitUnaryOperator(Instruction & I)1436 void SCCPInstVisitor::visitUnaryOperator(Instruction &I) {
1437 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1438
1439 ValueLatticeElement &IV = ValueState[&I];
1440 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1441 // discover a concrete value later.
1442 if (SCCPSolver::isOverdefined(IV))
1443 return (void)markOverdefined(&I);
1444
1445 // If something is unknown/undef, wait for it to resolve.
1446 if (V0State.isUnknownOrUndef())
1447 return;
1448
1449 if (SCCPSolver::isConstant(V0State))
1450 if (Constant *C = ConstantFoldUnaryOpOperand(
1451 I.getOpcode(), getConstant(V0State, I.getType()), DL))
1452 return (void)markConstant(IV, &I, C);
1453
1454 markOverdefined(&I);
1455 }
1456
visitFreezeInst(FreezeInst & I)1457 void SCCPInstVisitor::visitFreezeInst(FreezeInst &I) {
1458 // If this freeze returns a struct, just mark the result overdefined.
1459 // TODO: We could do a lot better than this.
1460 if (I.getType()->isStructTy())
1461 return (void)markOverdefined(&I);
1462
1463 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1464 ValueLatticeElement &IV = ValueState[&I];
1465 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1466 // discover a concrete value later.
1467 if (SCCPSolver::isOverdefined(IV))
1468 return (void)markOverdefined(&I);
1469
1470 // If something is unknown/undef, wait for it to resolve.
1471 if (V0State.isUnknownOrUndef())
1472 return;
1473
1474 if (SCCPSolver::isConstant(V0State) &&
1475 isGuaranteedNotToBeUndefOrPoison(getConstant(V0State, I.getType())))
1476 return (void)markConstant(IV, &I, getConstant(V0State, I.getType()));
1477
1478 markOverdefined(&I);
1479 }
1480
1481 // Handle Binary Operators.
visitBinaryOperator(Instruction & I)1482 void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
1483 ValueLatticeElement V1State = getValueState(I.getOperand(0));
1484 ValueLatticeElement V2State = getValueState(I.getOperand(1));
1485
1486 ValueLatticeElement &IV = ValueState[&I];
1487 if (IV.isOverdefined())
1488 return;
1489
1490 // If something is undef, wait for it to resolve.
1491 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef())
1492 return;
1493
1494 if (V1State.isOverdefined() && V2State.isOverdefined())
1495 return (void)markOverdefined(&I);
1496
1497 // If either of the operands is a constant, try to fold it to a constant.
1498 // TODO: Use information from notconstant better.
1499 if ((V1State.isConstant() || V2State.isConstant())) {
1500 Value *V1 = SCCPSolver::isConstant(V1State)
1501 ? getConstant(V1State, I.getOperand(0)->getType())
1502 : I.getOperand(0);
1503 Value *V2 = SCCPSolver::isConstant(V2State)
1504 ? getConstant(V2State, I.getOperand(1)->getType())
1505 : I.getOperand(1);
1506 Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL));
1507 auto *C = dyn_cast_or_null<Constant>(R);
1508 if (C) {
1509 // Conservatively assume that the result may be based on operands that may
1510 // be undef. Note that we use mergeInValue to combine the constant with
1511 // the existing lattice value for I, as different constants might be found
1512 // after one of the operands go to overdefined, e.g. due to one operand
1513 // being a special floating value.
1514 ValueLatticeElement NewV;
1515 NewV.markConstant(C, /*MayIncludeUndef=*/true);
1516 return (void)mergeInValue(&I, NewV);
1517 }
1518 }
1519
1520 // Only use ranges for binary operators on integers.
1521 if (!I.getType()->isIntOrIntVectorTy())
1522 return markOverdefined(&I);
1523
1524 // Try to simplify to a constant range.
1525 ConstantRange A =
1526 V1State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1527 ConstantRange B =
1528 V2State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1529
1530 auto *BO = cast<BinaryOperator>(&I);
1531 ConstantRange R = ConstantRange::getEmpty(I.getType()->getScalarSizeInBits());
1532 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO))
1533 R = A.overflowingBinaryOp(BO->getOpcode(), B, OBO->getNoWrapKind());
1534 else
1535 R = A.binaryOp(BO->getOpcode(), B);
1536 mergeInValue(&I, ValueLatticeElement::getRange(R));
1537
1538 // TODO: Currently we do not exploit special values that produce something
1539 // better than overdefined with an overdefined operand for vector or floating
1540 // point types, like and <4 x i32> overdefined, zeroinitializer.
1541 }
1542
1543 // Handle ICmpInst instruction.
visitCmpInst(CmpInst & I)1544 void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
1545 // Do not cache this lookup, getValueState calls later in the function might
1546 // invalidate the reference.
1547 if (SCCPSolver::isOverdefined(ValueState[&I]))
1548 return (void)markOverdefined(&I);
1549
1550 Value *Op1 = I.getOperand(0);
1551 Value *Op2 = I.getOperand(1);
1552
1553 // For parameters, use ParamState which includes constant range info if
1554 // available.
1555 auto V1State = getValueState(Op1);
1556 auto V2State = getValueState(Op2);
1557
1558 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State, DL);
1559 if (C) {
1560 ValueLatticeElement CV;
1561 CV.markConstant(C);
1562 mergeInValue(&I, CV);
1563 return;
1564 }
1565
1566 // If operands are still unknown, wait for it to resolve.
1567 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) &&
1568 !SCCPSolver::isConstant(ValueState[&I]))
1569 return;
1570
1571 markOverdefined(&I);
1572 }
1573
1574 // Handle getelementptr instructions. If all operands are constants then we
1575 // can turn this into a getelementptr ConstantExpr.
visitGetElementPtrInst(GetElementPtrInst & I)1576 void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
1577 if (SCCPSolver::isOverdefined(ValueState[&I]))
1578 return (void)markOverdefined(&I);
1579
1580 SmallVector<Constant *, 8> Operands;
1581 Operands.reserve(I.getNumOperands());
1582
1583 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
1584 ValueLatticeElement State = getValueState(I.getOperand(i));
1585 if (State.isUnknownOrUndef())
1586 return; // Operands are not resolved yet.
1587
1588 if (SCCPSolver::isOverdefined(State))
1589 return (void)markOverdefined(&I);
1590
1591 if (Constant *C = getConstant(State, I.getOperand(i)->getType())) {
1592 Operands.push_back(C);
1593 continue;
1594 }
1595
1596 return (void)markOverdefined(&I);
1597 }
1598
1599 if (Constant *C = ConstantFoldInstOperands(&I, Operands, DL))
1600 markConstant(&I, C);
1601 }
1602
visitStoreInst(StoreInst & SI)1603 void SCCPInstVisitor::visitStoreInst(StoreInst &SI) {
1604 // If this store is of a struct, ignore it.
1605 if (SI.getOperand(0)->getType()->isStructTy())
1606 return;
1607
1608 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
1609 return;
1610
1611 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
1612 auto I = TrackedGlobals.find(GV);
1613 if (I == TrackedGlobals.end())
1614 return;
1615
1616 // Get the value we are storing into the global, then merge it.
1617 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)),
1618 ValueLatticeElement::MergeOptions().setCheckWiden(false));
1619 if (I->second.isOverdefined())
1620 TrackedGlobals.erase(I); // No need to keep tracking this!
1621 }
1622
getValueFromMetadata(const Instruction * I)1623 static ValueLatticeElement getValueFromMetadata(const Instruction *I) {
1624 if (I->getType()->isIntOrIntVectorTy()) {
1625 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range))
1626 return ValueLatticeElement::getRange(
1627 getConstantRangeFromMetadata(*Ranges));
1628
1629 if (const auto *CB = dyn_cast<CallBase>(I))
1630 if (std::optional<ConstantRange> Range = CB->getRange())
1631 return ValueLatticeElement::getRange(*Range);
1632 }
1633 if (I->hasMetadata(LLVMContext::MD_nonnull))
1634 return ValueLatticeElement::getNot(
1635 ConstantPointerNull::get(cast<PointerType>(I->getType())));
1636 return ValueLatticeElement::getOverdefined();
1637 }
1638
1639 // Handle load instructions. If the operand is a constant pointer to a constant
1640 // global, we can replace the load with the loaded constant value!
visitLoadInst(LoadInst & I)1641 void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
1642 // If this load is of a struct or the load is volatile, just mark the result
1643 // as overdefined.
1644 if (I.getType()->isStructTy() || I.isVolatile())
1645 return (void)markOverdefined(&I);
1646
1647 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1648 // discover a concrete value later.
1649 if (ValueState[&I].isOverdefined())
1650 return (void)markOverdefined(&I);
1651
1652 ValueLatticeElement PtrVal = getValueState(I.getOperand(0));
1653 if (PtrVal.isUnknownOrUndef())
1654 return; // The pointer is not resolved yet!
1655
1656 ValueLatticeElement &IV = ValueState[&I];
1657
1658 if (SCCPSolver::isConstant(PtrVal)) {
1659 Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
1660
1661 // load null is undefined.
1662 if (isa<ConstantPointerNull>(Ptr)) {
1663 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace()))
1664 return (void)markOverdefined(IV, &I);
1665 else
1666 return;
1667 }
1668
1669 // Transform load (constant global) into the value loaded.
1670 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) {
1671 if (!TrackedGlobals.empty()) {
1672 // If we are tracking this global, merge in the known value for it.
1673 auto It = TrackedGlobals.find(GV);
1674 if (It != TrackedGlobals.end()) {
1675 mergeInValue(IV, &I, It->second, getMaxWidenStepsOpts());
1676 return;
1677 }
1678 }
1679 }
1680
1681 // Transform load from a constant into a constant if possible.
1682 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL))
1683 return (void)markConstant(IV, &I, C);
1684 }
1685
1686 // Fall back to metadata.
1687 mergeInValue(&I, getValueFromMetadata(&I));
1688 }
1689
visitCallBase(CallBase & CB)1690 void SCCPInstVisitor::visitCallBase(CallBase &CB) {
1691 handleCallResult(CB);
1692 handleCallArguments(CB);
1693 }
1694
handleCallOverdefined(CallBase & CB)1695 void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
1696 Function *F = CB.getCalledFunction();
1697
1698 // Void return and not tracking callee, just bail.
1699 if (CB.getType()->isVoidTy())
1700 return;
1701
1702 // Always mark struct return as overdefined.
1703 if (CB.getType()->isStructTy())
1704 return (void)markOverdefined(&CB);
1705
1706 // Otherwise, if we have a single return value case, and if the function is
1707 // a declaration, maybe we can constant fold it.
1708 if (F && F->isDeclaration() && canConstantFoldCallTo(&CB, F)) {
1709 SmallVector<Constant *, 8> Operands;
1710 for (const Use &A : CB.args()) {
1711 if (A.get()->getType()->isStructTy())
1712 return markOverdefined(&CB); // Can't handle struct args.
1713 if (A.get()->getType()->isMetadataTy())
1714 continue; // Carried in CB, not allowed in Operands.
1715 ValueLatticeElement State = getValueState(A);
1716
1717 if (State.isUnknownOrUndef())
1718 return; // Operands are not resolved yet.
1719 if (SCCPSolver::isOverdefined(State))
1720 return (void)markOverdefined(&CB);
1721 assert(SCCPSolver::isConstant(State) && "Unknown state!");
1722 Operands.push_back(getConstant(State, A->getType()));
1723 }
1724
1725 if (SCCPSolver::isOverdefined(getValueState(&CB)))
1726 return (void)markOverdefined(&CB);
1727
1728 // If we can constant fold this, mark the result of the call as a
1729 // constant.
1730 if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F)))
1731 return (void)markConstant(&CB, C);
1732 }
1733
1734 // Fall back to metadata.
1735 mergeInValue(&CB, getValueFromMetadata(&CB));
1736 }
1737
handleCallArguments(CallBase & CB)1738 void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
1739 Function *F = CB.getCalledFunction();
1740 // If this is a local function that doesn't have its address taken, mark its
1741 // entry block executable and merge in the actual arguments to the call into
1742 // the formal arguments of the function.
1743 if (TrackingIncomingArguments.count(F)) {
1744 markBlockExecutable(&F->front());
1745
1746 // Propagate information from this call site into the callee.
1747 auto CAI = CB.arg_begin();
1748 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
1749 ++AI, ++CAI) {
1750 // If this argument is byval, and if the function is not readonly, there
1751 // will be an implicit copy formed of the input aggregate.
1752 if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
1753 markOverdefined(&*AI);
1754 continue;
1755 }
1756
1757 if (auto *STy = dyn_cast<StructType>(AI->getType())) {
1758 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1759 ValueLatticeElement CallArg = getStructValueState(*CAI, i);
1760 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
1761 getMaxWidenStepsOpts());
1762 }
1763 } else
1764 mergeInValue(&*AI, getValueState(*CAI), getMaxWidenStepsOpts());
1765 }
1766 }
1767 }
1768
handleCallResult(CallBase & CB)1769 void SCCPInstVisitor::handleCallResult(CallBase &CB) {
1770 Function *F = CB.getCalledFunction();
1771
1772 if (auto *II = dyn_cast<IntrinsicInst>(&CB)) {
1773 if (II->getIntrinsicID() == Intrinsic::ssa_copy) {
1774 if (ValueState[&CB].isOverdefined())
1775 return;
1776
1777 Value *CopyOf = CB.getOperand(0);
1778 ValueLatticeElement CopyOfVal = getValueState(CopyOf);
1779 const auto *PI = getPredicateInfoFor(&CB);
1780 assert(PI && "Missing predicate info for ssa.copy");
1781
1782 const std::optional<PredicateConstraint> &Constraint =
1783 PI->getConstraint();
1784 if (!Constraint) {
1785 mergeInValue(ValueState[&CB], &CB, CopyOfVal);
1786 return;
1787 }
1788
1789 CmpInst::Predicate Pred = Constraint->Predicate;
1790 Value *OtherOp = Constraint->OtherOp;
1791
1792 // Wait until OtherOp is resolved.
1793 if (getValueState(OtherOp).isUnknown()) {
1794 addAdditionalUser(OtherOp, &CB);
1795 return;
1796 }
1797
1798 ValueLatticeElement CondVal = getValueState(OtherOp);
1799 ValueLatticeElement &IV = ValueState[&CB];
1800 if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
1801 auto ImposedCR =
1802 ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
1803
1804 // Get the range imposed by the condition.
1805 if (CondVal.isConstantRange())
1806 ImposedCR = ConstantRange::makeAllowedICmpRegion(
1807 Pred, CondVal.getConstantRange());
1808
1809 // Combine range info for the original value with the new range from the
1810 // condition.
1811 auto CopyOfCR = CopyOfVal.asConstantRange(CopyOf->getType(),
1812 /*UndefAllowed=*/true);
1813 // Treat an unresolved input like a full range.
1814 if (CopyOfCR.isEmptySet())
1815 CopyOfCR = ConstantRange::getFull(CopyOfCR.getBitWidth());
1816 auto NewCR = ImposedCR.intersectWith(CopyOfCR);
1817 // If the existing information is != x, do not use the information from
1818 // a chained predicate, as the != x information is more likely to be
1819 // helpful in practice.
1820 if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
1821 NewCR = CopyOfCR;
1822
1823 // The new range is based on a branch condition. That guarantees that
1824 // neither of the compare operands can be undef in the branch targets,
1825 // unless we have conditions that are always true/false (e.g. icmp ule
1826 // i32, %a, i32_max). For the latter overdefined/empty range will be
1827 // inferred, but the branch will get folded accordingly anyways.
1828 addAdditionalUser(OtherOp, &CB);
1829 mergeInValue(
1830 IV, &CB,
1831 ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
1832 return;
1833 } else if (Pred == CmpInst::ICMP_EQ &&
1834 (CondVal.isConstant() || CondVal.isNotConstant())) {
1835 // For non-integer values or integer constant expressions, only
1836 // propagate equal constants or not-constants.
1837 addAdditionalUser(OtherOp, &CB);
1838 mergeInValue(IV, &CB, CondVal);
1839 return;
1840 } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
1841 // Propagate inequalities.
1842 addAdditionalUser(OtherOp, &CB);
1843 mergeInValue(IV, &CB,
1844 ValueLatticeElement::getNot(CondVal.getConstant()));
1845 return;
1846 }
1847
1848 return (void)mergeInValue(IV, &CB, CopyOfVal);
1849 }
1850
1851 if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1852 // Compute result range for intrinsics supported by ConstantRange.
1853 // Do this even if we don't know a range for all operands, as we may
1854 // still know something about the result range, e.g. of abs(x).
1855 SmallVector<ConstantRange, 2> OpRanges;
1856 for (Value *Op : II->args()) {
1857 const ValueLatticeElement &State = getValueState(Op);
1858 if (State.isUnknownOrUndef())
1859 return;
1860 OpRanges.push_back(
1861 State.asConstantRange(Op->getType(), /*UndefAllowed=*/false));
1862 }
1863
1864 ConstantRange Result =
1865 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
1866 return (void)mergeInValue(II, ValueLatticeElement::getRange(Result));
1867 }
1868 }
1869
1870 // The common case is that we aren't tracking the callee, either because we
1871 // are not doing interprocedural analysis or the callee is indirect, or is
1872 // external. Handle these cases first.
1873 if (!F || F->isDeclaration())
1874 return handleCallOverdefined(CB);
1875
1876 // If this is a single/zero retval case, see if we're tracking the function.
1877 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
1878 if (!MRVFunctionsTracked.count(F))
1879 return handleCallOverdefined(CB); // Not tracking this callee.
1880
1881 // If we are tracking this callee, propagate the result of the function
1882 // into this call site.
1883 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1884 mergeInValue(getStructValueState(&CB, i), &CB,
1885 TrackedMultipleRetVals[std::make_pair(F, i)],
1886 getMaxWidenStepsOpts());
1887 } else {
1888 auto TFRVI = TrackedRetVals.find(F);
1889 if (TFRVI == TrackedRetVals.end())
1890 return handleCallOverdefined(CB); // Not tracking this callee.
1891
1892 // If so, propagate the return value of the callee into this call result.
1893 mergeInValue(&CB, TFRVI->second, getMaxWidenStepsOpts());
1894 }
1895 }
1896
solve()1897 void SCCPInstVisitor::solve() {
1898 // Process the work lists until they are empty!
1899 while (!BBWorkList.empty() || !InstWorkList.empty() ||
1900 !OverdefinedInstWorkList.empty()) {
1901 // Process the overdefined instruction's work list first, which drives other
1902 // things to overdefined more quickly.
1903 while (!OverdefinedInstWorkList.empty()) {
1904 Value *I = OverdefinedInstWorkList.pop_back_val();
1905 Invalidated.erase(I);
1906
1907 LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n');
1908
1909 // "I" got into the work list because it either made the transition from
1910 // bottom to constant, or to overdefined.
1911 //
1912 // Anything on this worklist that is overdefined need not be visited
1913 // since all of its users will have already been marked as overdefined
1914 // Update all of the users of this instruction's value.
1915 //
1916 markUsersAsChanged(I);
1917 }
1918
1919 // Process the instruction work list.
1920 while (!InstWorkList.empty()) {
1921 Value *I = InstWorkList.pop_back_val();
1922 Invalidated.erase(I);
1923
1924 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
1925
1926 // "I" got into the work list because it made the transition from undef to
1927 // constant.
1928 //
1929 // Anything on this worklist that is overdefined need not be visited
1930 // since all of its users will have already been marked as overdefined.
1931 // Update all of the users of this instruction's value.
1932 //
1933 if (I->getType()->isStructTy() || !getValueState(I).isOverdefined())
1934 markUsersAsChanged(I);
1935 }
1936
1937 // Process the basic block work list.
1938 while (!BBWorkList.empty()) {
1939 BasicBlock *BB = BBWorkList.pop_back_val();
1940
1941 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
1942
1943 // Notify all instructions in this basic block that they are newly
1944 // executable.
1945 visit(BB);
1946 }
1947 }
1948 }
1949
resolvedUndef(Instruction & I)1950 bool SCCPInstVisitor::resolvedUndef(Instruction &I) {
1951 // Look for instructions which produce undef values.
1952 if (I.getType()->isVoidTy())
1953 return false;
1954
1955 if (auto *STy = dyn_cast<StructType>(I.getType())) {
1956 // Only a few things that can be structs matter for undef.
1957
1958 // Tracked calls must never be marked overdefined in resolvedUndefsIn.
1959 if (auto *CB = dyn_cast<CallBase>(&I))
1960 if (Function *F = CB->getCalledFunction())
1961 if (MRVFunctionsTracked.count(F))
1962 return false;
1963
1964 // extractvalue and insertvalue don't need to be marked; they are
1965 // tracked as precisely as their operands.
1966 if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I))
1967 return false;
1968 // Send the results of everything else to overdefined. We could be
1969 // more precise than this but it isn't worth bothering.
1970 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1971 ValueLatticeElement &LV = getStructValueState(&I, i);
1972 if (LV.isUnknown()) {
1973 markOverdefined(LV, &I);
1974 return true;
1975 }
1976 }
1977 return false;
1978 }
1979
1980 ValueLatticeElement &LV = getValueState(&I);
1981 if (!LV.isUnknown())
1982 return false;
1983
1984 // There are two reasons a call can have an undef result
1985 // 1. It could be tracked.
1986 // 2. It could be constant-foldable.
1987 // Because of the way we solve return values, tracked calls must
1988 // never be marked overdefined in resolvedUndefsIn.
1989 if (auto *CB = dyn_cast<CallBase>(&I))
1990 if (Function *F = CB->getCalledFunction())
1991 if (TrackedRetVals.count(F))
1992 return false;
1993
1994 if (isa<LoadInst>(I)) {
1995 // A load here means one of two things: a load of undef from a global,
1996 // a load from an unknown pointer. Either way, having it return undef
1997 // is okay.
1998 return false;
1999 }
2000
2001 markOverdefined(&I);
2002 return true;
2003 }
2004
2005 /// While solving the dataflow for a function, we don't compute a result for
2006 /// operations with an undef operand, to allow undef to be lowered to a
2007 /// constant later. For example, constant folding of "zext i8 undef to i16"
2008 /// would result in "i16 0", and if undef is later lowered to "i8 1", then the
2009 /// zext result would become "i16 1" and would result into an overdefined
2010 /// lattice value once merged with the previous result. Not computing the
2011 /// result of the zext (treating undef the same as unknown) allows us to handle
2012 /// a later undef->constant lowering more optimally.
2013 ///
2014 /// However, if the operand remains undef when the solver returns, we do need
2015 /// to assign some result to the instruction (otherwise we would treat it as
2016 /// unreachable). For simplicity, we mark any instructions that are still
2017 /// unknown as overdefined.
resolvedUndefsIn(Function & F)2018 bool SCCPInstVisitor::resolvedUndefsIn(Function &F) {
2019 bool MadeChange = false;
2020 for (BasicBlock &BB : F) {
2021 if (!BBExecutable.count(&BB))
2022 continue;
2023
2024 for (Instruction &I : BB)
2025 MadeChange |= resolvedUndef(I);
2026 }
2027
2028 LLVM_DEBUG(if (MadeChange) dbgs()
2029 << "\nResolved undefs in " << F.getName() << '\n');
2030
2031 return MadeChange;
2032 }
2033
2034 //===----------------------------------------------------------------------===//
2035 //
2036 // SCCPSolver implementations
2037 //
SCCPSolver(const DataLayout & DL,std::function<const TargetLibraryInfo & (Function &)> GetTLI,LLVMContext & Ctx)2038 SCCPSolver::SCCPSolver(
2039 const DataLayout &DL,
2040 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
2041 LLVMContext &Ctx)
2042 : Visitor(new SCCPInstVisitor(DL, std::move(GetTLI), Ctx)) {}
2043
2044 SCCPSolver::~SCCPSolver() = default;
2045
addPredicateInfo(Function & F,DominatorTree & DT,AssumptionCache & AC)2046 void SCCPSolver::addPredicateInfo(Function &F, DominatorTree &DT,
2047 AssumptionCache &AC) {
2048 Visitor->addPredicateInfo(F, DT, AC);
2049 }
2050
markBlockExecutable(BasicBlock * BB)2051 bool SCCPSolver::markBlockExecutable(BasicBlock *BB) {
2052 return Visitor->markBlockExecutable(BB);
2053 }
2054
getPredicateInfoFor(Instruction * I)2055 const PredicateBase *SCCPSolver::getPredicateInfoFor(Instruction *I) {
2056 return Visitor->getPredicateInfoFor(I);
2057 }
2058
trackValueOfGlobalVariable(GlobalVariable * GV)2059 void SCCPSolver::trackValueOfGlobalVariable(GlobalVariable *GV) {
2060 Visitor->trackValueOfGlobalVariable(GV);
2061 }
2062
addTrackedFunction(Function * F)2063 void SCCPSolver::addTrackedFunction(Function *F) {
2064 Visitor->addTrackedFunction(F);
2065 }
2066
addToMustPreserveReturnsInFunctions(Function * F)2067 void SCCPSolver::addToMustPreserveReturnsInFunctions(Function *F) {
2068 Visitor->addToMustPreserveReturnsInFunctions(F);
2069 }
2070
mustPreserveReturn(Function * F)2071 bool SCCPSolver::mustPreserveReturn(Function *F) {
2072 return Visitor->mustPreserveReturn(F);
2073 }
2074
addArgumentTrackedFunction(Function * F)2075 void SCCPSolver::addArgumentTrackedFunction(Function *F) {
2076 Visitor->addArgumentTrackedFunction(F);
2077 }
2078
isArgumentTrackedFunction(Function * F)2079 bool SCCPSolver::isArgumentTrackedFunction(Function *F) {
2080 return Visitor->isArgumentTrackedFunction(F);
2081 }
2082
solve()2083 void SCCPSolver::solve() { Visitor->solve(); }
2084
resolvedUndefsIn(Function & F)2085 bool SCCPSolver::resolvedUndefsIn(Function &F) {
2086 return Visitor->resolvedUndefsIn(F);
2087 }
2088
solveWhileResolvedUndefsIn(Module & M)2089 void SCCPSolver::solveWhileResolvedUndefsIn(Module &M) {
2090 Visitor->solveWhileResolvedUndefsIn(M);
2091 }
2092
2093 void
solveWhileResolvedUndefsIn(SmallVectorImpl<Function * > & WorkList)2094 SCCPSolver::solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList) {
2095 Visitor->solveWhileResolvedUndefsIn(WorkList);
2096 }
2097
solveWhileResolvedUndefs()2098 void SCCPSolver::solveWhileResolvedUndefs() {
2099 Visitor->solveWhileResolvedUndefs();
2100 }
2101
isBlockExecutable(BasicBlock * BB) const2102 bool SCCPSolver::isBlockExecutable(BasicBlock *BB) const {
2103 return Visitor->isBlockExecutable(BB);
2104 }
2105
isEdgeFeasible(BasicBlock * From,BasicBlock * To) const2106 bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) const {
2107 return Visitor->isEdgeFeasible(From, To);
2108 }
2109
2110 std::vector<ValueLatticeElement>
getStructLatticeValueFor(Value * V) const2111 SCCPSolver::getStructLatticeValueFor(Value *V) const {
2112 return Visitor->getStructLatticeValueFor(V);
2113 }
2114
removeLatticeValueFor(Value * V)2115 void SCCPSolver::removeLatticeValueFor(Value *V) {
2116 return Visitor->removeLatticeValueFor(V);
2117 }
2118
resetLatticeValueFor(CallBase * Call)2119 void SCCPSolver::resetLatticeValueFor(CallBase *Call) {
2120 Visitor->resetLatticeValueFor(Call);
2121 }
2122
getLatticeValueFor(Value * V) const2123 const ValueLatticeElement &SCCPSolver::getLatticeValueFor(Value *V) const {
2124 return Visitor->getLatticeValueFor(V);
2125 }
2126
2127 const MapVector<Function *, ValueLatticeElement> &
getTrackedRetVals()2128 SCCPSolver::getTrackedRetVals() {
2129 return Visitor->getTrackedRetVals();
2130 }
2131
2132 const DenseMap<GlobalVariable *, ValueLatticeElement> &
getTrackedGlobals()2133 SCCPSolver::getTrackedGlobals() {
2134 return Visitor->getTrackedGlobals();
2135 }
2136
getMRVFunctionsTracked()2137 const SmallPtrSet<Function *, 16> SCCPSolver::getMRVFunctionsTracked() {
2138 return Visitor->getMRVFunctionsTracked();
2139 }
2140
markOverdefined(Value * V)2141 void SCCPSolver::markOverdefined(Value *V) { Visitor->markOverdefined(V); }
2142
trackValueOfArgument(Argument * V)2143 void SCCPSolver::trackValueOfArgument(Argument *V) {
2144 Visitor->trackValueOfArgument(V);
2145 }
2146
isStructLatticeConstant(Function * F,StructType * STy)2147 bool SCCPSolver::isStructLatticeConstant(Function *F, StructType *STy) {
2148 return Visitor->isStructLatticeConstant(F, STy);
2149 }
2150
getConstant(const ValueLatticeElement & LV,Type * Ty) const2151 Constant *SCCPSolver::getConstant(const ValueLatticeElement &LV,
2152 Type *Ty) const {
2153 return Visitor->getConstant(LV, Ty);
2154 }
2155
getConstantOrNull(Value * V) const2156 Constant *SCCPSolver::getConstantOrNull(Value *V) const {
2157 return Visitor->getConstantOrNull(V);
2158 }
2159
getArgumentTrackedFunctions()2160 SmallPtrSetImpl<Function *> &SCCPSolver::getArgumentTrackedFunctions() {
2161 return Visitor->getArgumentTrackedFunctions();
2162 }
2163
setLatticeValueForSpecializationArguments(Function * F,const SmallVectorImpl<ArgInfo> & Args)2164 void SCCPSolver::setLatticeValueForSpecializationArguments(Function *F,
2165 const SmallVectorImpl<ArgInfo> &Args) {
2166 Visitor->setLatticeValueForSpecializationArguments(F, Args);
2167 }
2168
markFunctionUnreachable(Function * F)2169 void SCCPSolver::markFunctionUnreachable(Function *F) {
2170 Visitor->markFunctionUnreachable(F);
2171 }
2172
visit(Instruction * I)2173 void SCCPSolver::visit(Instruction *I) { Visitor->visit(I); }
2174
visitCall(CallInst & I)2175 void SCCPSolver::visitCall(CallInst &I) { Visitor->visitCall(I); }
2176