xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass that deduces and/or propagates
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/CallGraphSCCPass.h"
25 #include "llvm/Analysis/InlineCost.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MustExecute.h"
28 #include "llvm/IR/AttributeMask.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/ConstantFold.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/ValueHandle.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/DebugCounter.h"
45 #include "llvm/Support/FileSystem.h"
46 #include "llvm/Support/GraphWriter.h"
47 #include "llvm/Support/ModRef.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/Cloning.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include <cstdint>
53 
54 #ifdef EXPENSIVE_CHECKS
55 #include "llvm/IR/Verifier.h"
56 #endif
57 
58 #include <cassert>
59 #include <optional>
60 #include <string>
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "attributor"
65 #define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
66 
67 DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
68               "Determine what attributes are manifested in the IR");
69 
70 STATISTIC(NumFnDeleted, "Number of function deleted");
71 STATISTIC(NumFnWithExactDefinition,
72           "Number of functions with exact definitions");
73 STATISTIC(NumFnWithoutExactDefinition,
74           "Number of functions without exact definitions");
75 STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
76 STATISTIC(NumAttributesTimedOut,
77           "Number of abstract attributes timed out before fixpoint");
78 STATISTIC(NumAttributesValidFixpoint,
79           "Number of abstract attributes in a valid fixpoint state");
80 STATISTIC(NumAttributesManifested,
81           "Number of abstract attributes manifested in IR");
82 
83 // TODO: Determine a good default value.
84 //
85 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
86 // (when run with the first 5 abstract attributes). The results also indicate
87 // that we never reach 32 iterations but always find a fixpoint sooner.
88 //
89 // This will become more evolved once we perform two interleaved fixpoint
90 // iterations: bottom-up and top-down.
91 static cl::opt<unsigned>
92     SetFixpointIterations("attributor-max-iterations", cl::Hidden,
93                           cl::desc("Maximal number of fixpoint iterations."),
94                           cl::init(32));
95 
96 static cl::opt<unsigned, true> MaxInitializationChainLengthX(
97     "attributor-max-initialization-chain-length", cl::Hidden,
98     cl::desc(
99         "Maximal number of chained initializations (to avoid stack overflows)"),
100     cl::location(MaxInitializationChainLength), cl::init(1024));
101 unsigned llvm::MaxInitializationChainLength;
102 
103 static cl::opt<bool> AnnotateDeclarationCallSites(
104     "attributor-annotate-decl-cs", cl::Hidden,
105     cl::desc("Annotate call sites of function declarations."), cl::init(false));
106 
107 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
108                                        cl::init(true), cl::Hidden);
109 
110 static cl::opt<bool>
111     AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
112                          cl::desc("Allow the Attributor to create shallow "
113                                   "wrappers for non-exact definitions."),
114                          cl::init(false));
115 
116 static cl::opt<bool>
117     AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
118                      cl::desc("Allow the Attributor to use IP information "
119                               "derived from non-exact functions via cloning"),
120                      cl::init(false));
121 
122 // These options can only used for debug builds.
123 #ifndef NDEBUG
124 static cl::list<std::string>
125     SeedAllowList("attributor-seed-allow-list", cl::Hidden,
126                   cl::desc("Comma seperated list of attribute names that are "
127                            "allowed to be seeded."),
128                   cl::CommaSeparated);
129 
130 static cl::list<std::string> FunctionSeedAllowList(
131     "attributor-function-seed-allow-list", cl::Hidden,
132     cl::desc("Comma seperated list of function names that are "
133              "allowed to be seeded."),
134     cl::CommaSeparated);
135 #endif
136 
137 static cl::opt<bool>
138     DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
139                  cl::desc("Dump the dependency graph to dot files."),
140                  cl::init(false));
141 
142 static cl::opt<std::string> DepGraphDotFileNamePrefix(
143     "attributor-depgraph-dot-filename-prefix", cl::Hidden,
144     cl::desc("The prefix used for the CallGraph dot file names."));
145 
146 static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
147                                   cl::desc("View the dependency graph."),
148                                   cl::init(false));
149 
150 static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
151                                        cl::desc("Print attribute dependencies"),
152                                        cl::init(false));
153 
154 static cl::opt<bool> EnableCallSiteSpecific(
155     "attributor-enable-call-site-specific-deduction", cl::Hidden,
156     cl::desc("Allow the Attributor to do call site specific analysis"),
157     cl::init(false));
158 
159 static cl::opt<bool>
160     PrintCallGraph("attributor-print-call-graph", cl::Hidden,
161                    cl::desc("Print Attributor's internal call graph"),
162                    cl::init(false));
163 
164 static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
165                                       cl::Hidden,
166                                       cl::desc("Try to simplify all loads."),
167                                       cl::init(true));
168 
169 /// Logic operators for the change status enum class.
170 ///
171 ///{
172 ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
173   return L == ChangeStatus::CHANGED ? L : R;
174 }
175 ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
176   L = L | R;
177   return L;
178 }
179 ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
180   return L == ChangeStatus::UNCHANGED ? L : R;
181 }
182 ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
183   L = L & R;
184   return L;
185 }
186 ///}
187 
188 bool AA::isGPU(const Module &M) {
189   Triple T(M.getTargetTriple());
190   return T.isAMDGPU() || T.isNVPTX();
191 }
192 
193 bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
194                       const AbstractAttribute &QueryingAA) {
195   // We are looking for volatile instructions or non-relaxed atomics.
196   if (const auto *CB = dyn_cast<CallBase>(&I)) {
197     if (CB->hasFnAttr(Attribute::NoSync))
198       return true;
199 
200     // Non-convergent and readnone imply nosync.
201     if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
202       return true;
203 
204     if (AANoSync::isNoSyncIntrinsic(&I))
205       return true;
206 
207     bool IsKnownNoSync;
208     return AA::hasAssumedIRAttr<Attribute::NoSync>(
209         A, &QueryingAA, IRPosition::callsite_function(*CB),
210         DepClassTy::OPTIONAL, IsKnownNoSync);
211   }
212 
213   if (!I.mayReadOrWriteMemory())
214     return true;
215 
216   return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
217 }
218 
219 bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
220                              const Value &V, bool ForAnalysisOnly) {
221   // TODO: See the AAInstanceInfo class comment.
222   if (!ForAnalysisOnly)
223     return false;
224   auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
225       QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
226   return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
227 }
228 
229 Constant *AA::getInitialValueForObj(Attributor &A, Value &Obj, Type &Ty,
230                                     const TargetLibraryInfo *TLI,
231                                     const DataLayout &DL,
232                                     AA::RangeTy *RangePtr) {
233   if (isa<AllocaInst>(Obj))
234     return UndefValue::get(&Ty);
235   if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
236     return Init;
237   auto *GV = dyn_cast<GlobalVariable>(&Obj);
238   if (!GV)
239     return nullptr;
240 
241   bool UsedAssumedInformation = false;
242   Constant *Initializer = nullptr;
243   if (A.hasGlobalVariableSimplificationCallback(*GV)) {
244     auto AssumedGV = A.getAssumedInitializerFromCallBack(
245         *GV, /* const AbstractAttribute *AA */ nullptr, UsedAssumedInformation);
246     Initializer = *AssumedGV;
247     if (!Initializer)
248       return nullptr;
249   } else {
250     if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
251       return nullptr;
252     if (!GV->hasInitializer())
253       return UndefValue::get(&Ty);
254 
255     if (!Initializer)
256       Initializer = GV->getInitializer();
257   }
258 
259   if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
260     APInt Offset = APInt(64, RangePtr->Offset);
261     return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
262   }
263 
264   return ConstantFoldLoadFromUniformValue(Initializer, &Ty);
265 }
266 
267 bool AA::isValidInScope(const Value &V, const Function *Scope) {
268   if (isa<Constant>(V))
269     return true;
270   if (auto *I = dyn_cast<Instruction>(&V))
271     return I->getFunction() == Scope;
272   if (auto *A = dyn_cast<Argument>(&V))
273     return A->getParent() == Scope;
274   return false;
275 }
276 
277 bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
278                            InformationCache &InfoCache) {
279   if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
280     return true;
281   const Function *Scope = nullptr;
282   const Instruction *CtxI = VAC.getCtxI();
283   if (CtxI)
284     Scope = CtxI->getFunction();
285   if (auto *A = dyn_cast<Argument>(VAC.getValue()))
286     return A->getParent() == Scope;
287   if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
288     if (I->getFunction() == Scope) {
289       if (const DominatorTree *DT =
290               InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
291                   *Scope))
292         return DT->dominates(I, CtxI);
293       // Local dominance check mostly for the old PM passes.
294       if (CtxI && I->getParent() == CtxI->getParent())
295         return llvm::any_of(
296             make_range(I->getIterator(), I->getParent()->end()),
297             [&](const Instruction &AfterI) { return &AfterI == CtxI; });
298     }
299   }
300   return false;
301 }
302 
303 Value *AA::getWithType(Value &V, Type &Ty) {
304   if (V.getType() == &Ty)
305     return &V;
306   if (isa<PoisonValue>(V))
307     return PoisonValue::get(&Ty);
308   if (isa<UndefValue>(V))
309     return UndefValue::get(&Ty);
310   if (auto *C = dyn_cast<Constant>(&V)) {
311     if (C->isNullValue())
312       return Constant::getNullValue(&Ty);
313     if (C->getType()->isPointerTy() && Ty.isPointerTy())
314       return ConstantExpr::getPointerCast(C, &Ty);
315     if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
316       if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
317         return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
318       if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
319         return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true);
320     }
321   }
322   return nullptr;
323 }
324 
325 std::optional<Value *>
326 AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
327                                          const std::optional<Value *> &B,
328                                          Type *Ty) {
329   if (A == B)
330     return A;
331   if (!B)
332     return A;
333   if (*B == nullptr)
334     return nullptr;
335   if (!A)
336     return Ty ? getWithType(**B, *Ty) : nullptr;
337   if (*A == nullptr)
338     return nullptr;
339   if (!Ty)
340     Ty = (*A)->getType();
341   if (isa_and_nonnull<UndefValue>(*A))
342     return getWithType(**B, *Ty);
343   if (isa<UndefValue>(*B))
344     return A;
345   if (*A && *B && *A == getWithType(**B, *Ty))
346     return A;
347   return nullptr;
348 }
349 
350 template <bool IsLoad, typename Ty>
351 static bool getPotentialCopiesOfMemoryValue(
352     Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
353     SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
354     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
355     bool OnlyExact) {
356   LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
357                     << " (only exact: " << OnlyExact << ")\n";);
358 
359   Value &Ptr = *I.getPointerOperand();
360   // Containers to remember the pointer infos and new copies while we are not
361   // sure that we can find all of them. If we abort we want to avoid spurious
362   // dependences and potential copies in the provided container.
363   SmallVector<const AAPointerInfo *> PIs;
364   SmallVector<Value *> NewCopies;
365   SmallVector<Instruction *> NewCopyOrigins;
366 
367   const auto *TLI =
368       A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
369 
370   auto Pred = [&](Value &Obj) {
371     LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
372     if (isa<UndefValue>(&Obj))
373       return true;
374     if (isa<ConstantPointerNull>(&Obj)) {
375       // A null pointer access can be undefined but any offset from null may
376       // be OK. We do not try to optimize the latter.
377       if (!NullPointerIsDefined(I.getFunction(),
378                                 Ptr.getType()->getPointerAddressSpace()) &&
379           A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
380                                  AA::Interprocedural) == &Obj)
381         return true;
382       LLVM_DEBUG(
383           dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
384       return false;
385     }
386     // TODO: Use assumed noalias return.
387     if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
388         !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
389       LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
390                         << "\n";);
391       return false;
392     }
393     if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
394       if (!GV->hasLocalLinkage() &&
395           !(GV->isConstant() && GV->hasInitializer())) {
396         LLVM_DEBUG(dbgs() << "Underlying object is global with external "
397                              "linkage, not supported yet: "
398                           << Obj << "\n";);
399         return false;
400       }
401 
402     bool NullOnly = true;
403     bool NullRequired = false;
404     auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
405                                         bool IsExact) {
406       if (!V || *V == nullptr)
407         NullOnly = false;
408       else if (isa<UndefValue>(*V))
409         /* No op */;
410       else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
411         NullRequired = !IsExact;
412       else
413         NullOnly = false;
414     };
415 
416     auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
417                                       Value &V) {
418       Value *AdjV = AA::getWithType(V, *I.getType());
419       if (!AdjV) {
420         LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
421                              "cannot be converted to read type: "
422                           << *Acc.getRemoteInst() << " : " << *I.getType()
423                           << "\n";);
424       }
425       return AdjV;
426     };
427 
428     auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
429       if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
430         return true;
431       if (IsLoad && Acc.isWrittenValueYetUndetermined())
432         return true;
433       CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
434       if (OnlyExact && !IsExact && !NullOnly &&
435           !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
436         LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
437                           << ", abort!\n");
438         return false;
439       }
440       if (NullRequired && !NullOnly) {
441         LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
442                              "one, however found non-null one: "
443                           << *Acc.getRemoteInst() << ", abort!\n");
444         return false;
445       }
446       if (IsLoad) {
447         assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
448         if (!Acc.isWrittenValueUnknown()) {
449           Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
450           if (!V)
451             return false;
452           NewCopies.push_back(V);
453           NewCopyOrigins.push_back(Acc.getRemoteInst());
454           return true;
455         }
456         auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
457         if (!SI) {
458           LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
459                                "instruction not supported yet: "
460                             << *Acc.getRemoteInst() << "\n";);
461           return false;
462         }
463         Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
464         if (!V)
465           return false;
466         NewCopies.push_back(V);
467         NewCopyOrigins.push_back(SI);
468       } else {
469         assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
470         auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
471         if (!LI && OnlyExact) {
472           LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
473                                "instruction not supported yet: "
474                             << *Acc.getRemoteInst() << "\n";);
475           return false;
476         }
477         NewCopies.push_back(Acc.getRemoteInst());
478       }
479       return true;
480     };
481 
482     // If the value has been written to we don't need the initial value of the
483     // object.
484     bool HasBeenWrittenTo = false;
485 
486     AA::RangeTy Range;
487     auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
488                                          DepClassTy::NONE);
489     if (!PI ||
490         !PI->forallInterferingAccesses(A, QueryingAA, I,
491                                        /* FindInterferingWrites */ IsLoad,
492                                        /* FindInterferingReads */ !IsLoad,
493                                        CheckAccess, HasBeenWrittenTo, Range)) {
494       LLVM_DEBUG(
495           dbgs()
496           << "Failed to verify all interfering accesses for underlying object: "
497           << Obj << "\n");
498       return false;
499     }
500 
501     if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
502       const DataLayout &DL = A.getDataLayout();
503       Value *InitialValue =
504           AA::getInitialValueForObj(A, Obj, *I.getType(), TLI, DL, &Range);
505       if (!InitialValue) {
506         LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
507                              "underlying object, abort!\n");
508         return false;
509       }
510       CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
511       if (NullRequired && !NullOnly) {
512         LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
513                              "null or undef, abort!\n");
514         return false;
515       }
516 
517       NewCopies.push_back(InitialValue);
518       NewCopyOrigins.push_back(nullptr);
519     }
520 
521     PIs.push_back(PI);
522 
523     return true;
524   };
525 
526   const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
527       QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
528   if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
529     LLVM_DEBUG(
530         dbgs() << "Underlying objects stored into could not be determined\n";);
531     return false;
532   }
533 
534   // Only if we were successful collection all potential copies we record
535   // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
536   // given PotentialCopies container.
537   for (const auto *PI : PIs) {
538     if (!PI->getState().isAtFixpoint())
539       UsedAssumedInformation = true;
540     A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
541   }
542   PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
543   PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
544 
545   return true;
546 }
547 
548 bool AA::getPotentiallyLoadedValues(
549     Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
550     SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
551     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
552     bool OnlyExact) {
553   return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
554       A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
555       UsedAssumedInformation, OnlyExact);
556 }
557 
558 bool AA::getPotentialCopiesOfStoredValue(
559     Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
560     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
561     bool OnlyExact) {
562   SmallSetVector<Instruction *, 4> PotentialValueOrigins;
563   return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
564       A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
565       UsedAssumedInformation, OnlyExact);
566 }
567 
568 static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
569                                         const AbstractAttribute &QueryingAA,
570                                         bool RequireReadNone, bool &IsKnown) {
571   if (RequireReadNone) {
572     if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
573             A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
574             /* IgnoreSubsumingPositions */ true))
575       return true;
576   } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
577                  A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
578                  /* IgnoreSubsumingPositions */ true))
579     return true;
580 
581   IRPosition::Kind Kind = IRP.getPositionKind();
582   if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
583     const auto *MemLocAA =
584         A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
585     if (MemLocAA && MemLocAA->isAssumedReadNone()) {
586       IsKnown = MemLocAA->isKnownReadNone();
587       if (!IsKnown)
588         A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
589       return true;
590     }
591   }
592 
593   const auto *MemBehaviorAA =
594       A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
595   if (MemBehaviorAA &&
596       (MemBehaviorAA->isAssumedReadNone() ||
597        (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
598     IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
599                               : MemBehaviorAA->isKnownReadOnly();
600     if (!IsKnown)
601       A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
602     return true;
603   }
604 
605   return false;
606 }
607 
608 bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
609                            const AbstractAttribute &QueryingAA, bool &IsKnown) {
610   return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
611                                      /* RequireReadNone */ false, IsKnown);
612 }
613 bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
614                            const AbstractAttribute &QueryingAA, bool &IsKnown) {
615   return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
616                                      /* RequireReadNone */ true, IsKnown);
617 }
618 
619 static bool
620 isPotentiallyReachable(Attributor &A, const Instruction &FromI,
621                        const Instruction *ToI, const Function &ToFn,
622                        const AbstractAttribute &QueryingAA,
623                        const AA::InstExclusionSetTy *ExclusionSet,
624                        std::function<bool(const Function &F)> GoBackwardsCB) {
625   DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
626     dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
627            << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
628            << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
629            << "]\n";
630     if (ExclusionSet)
631       for (auto *ES : *ExclusionSet)
632         dbgs() << *ES << "\n";
633   });
634 
635   // We know kernels (generally) cannot be called from within the module. Thus,
636   // for reachability we would need to step back from a kernel which would allow
637   // us to reach anything anyway. Even if a kernel is invoked from another
638   // kernel, values like allocas and shared memory are not accessible. We
639   // implicitly check for this situation to avoid costly lookups.
640   if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
641       !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
642       FromI.getFunction()->hasFnAttribute("kernel")) {
643     LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
644                          "module; success\n";);
645     return false;
646   }
647 
648   // If we can go arbitrarily backwards we will eventually reach an entry point
649   // that can reach ToI. Only if a set of blocks through which we cannot go is
650   // provided, or once we track internal functions not accessible from the
651   // outside, it makes sense to perform backwards analysis in the absence of a
652   // GoBackwardsCB.
653   if (!GoBackwardsCB && !ExclusionSet) {
654     LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
655                       << " is not checked backwards and does not have an "
656                          "exclusion set, abort\n");
657     return true;
658   }
659 
660   SmallPtrSet<const Instruction *, 8> Visited;
661   SmallVector<const Instruction *> Worklist;
662   Worklist.push_back(&FromI);
663 
664   while (!Worklist.empty()) {
665     const Instruction *CurFromI = Worklist.pop_back_val();
666     if (!Visited.insert(CurFromI).second)
667       continue;
668 
669     const Function *FromFn = CurFromI->getFunction();
670     if (FromFn == &ToFn) {
671       if (!ToI)
672         return true;
673       LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
674                         << " intraprocedurally\n");
675       const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
676           QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
677       bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
678                                            A, *CurFromI, *ToI, ExclusionSet);
679       LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
680                         << (Result ? "can potentially " : "cannot ") << "reach "
681                         << *ToI << " [Intra]\n");
682       if (Result)
683         return true;
684     }
685 
686     bool Result = true;
687     if (!ToFn.isDeclaration() && ToI) {
688       const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
689           QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
690       const Instruction &EntryI = ToFn.getEntryBlock().front();
691       Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
692                                         A, EntryI, *ToI, ExclusionSet);
693       LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
694                         << " " << (Result ? "can potentially " : "cannot ")
695                         << "reach @" << *ToI << " [ToFn]\n");
696     }
697 
698     if (Result) {
699       // The entry of the ToFn can reach the instruction ToI. If the current
700       // instruction is already known to reach the ToFn.
701       const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
702           QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
703       Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
704                                         A, *CurFromI, ToFn, ExclusionSet);
705       LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
706                         << " " << (Result ? "can potentially " : "cannot ")
707                         << "reach @" << ToFn.getName() << " [FromFn]\n");
708       if (Result)
709         return true;
710     }
711 
712     // TODO: Check assumed nounwind.
713     const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
714         QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
715     auto ReturnInstCB = [&](Instruction &Ret) {
716       bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
717                                            A, *CurFromI, Ret, ExclusionSet);
718       LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
719                         << (Result ? "can potentially " : "cannot ") << "reach "
720                         << Ret << " [Intra]\n");
721       return !Result;
722     };
723 
724     // Check if we can reach returns.
725     bool UsedAssumedInformation = false;
726     if (A.checkForAllInstructions(ReturnInstCB, FromFn, QueryingAA,
727                                   {Instruction::Ret}, UsedAssumedInformation)) {
728       LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
729       continue;
730     }
731 
732     if (!GoBackwardsCB) {
733       LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
734                         << " is not checked backwards, abort\n");
735       return true;
736     }
737 
738     // If we do not go backwards from the FromFn we are done here and so far we
739     // could not find a way to reach ToFn/ToI.
740     if (!GoBackwardsCB(*FromFn))
741       continue;
742 
743     LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
744                       << FromFn->getName() << "\n");
745 
746     auto CheckCallSite = [&](AbstractCallSite ACS) {
747       CallBase *CB = ACS.getInstruction();
748       if (!CB)
749         return false;
750 
751       if (isa<InvokeInst>(CB))
752         return false;
753 
754       Instruction *Inst = CB->getNextNonDebugInstruction();
755       Worklist.push_back(Inst);
756       return true;
757     };
758 
759     Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
760                                      /* RequireAllCallSites */ true,
761                                      &QueryingAA, UsedAssumedInformation);
762     if (Result) {
763       LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
764                         << " in @" << FromFn->getName()
765                         << " failed, give up\n");
766       return true;
767     }
768 
769     LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
770                       << " in @" << FromFn->getName()
771                       << " worklist size is: " << Worklist.size() << "\n");
772   }
773   return false;
774 }
775 
776 bool AA::isPotentiallyReachable(
777     Attributor &A, const Instruction &FromI, const Instruction &ToI,
778     const AbstractAttribute &QueryingAA,
779     const AA::InstExclusionSetTy *ExclusionSet,
780     std::function<bool(const Function &F)> GoBackwardsCB) {
781   const Function *ToFn = ToI.getFunction();
782   return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
783                                   ExclusionSet, GoBackwardsCB);
784 }
785 
786 bool AA::isPotentiallyReachable(
787     Attributor &A, const Instruction &FromI, const Function &ToFn,
788     const AbstractAttribute &QueryingAA,
789     const AA::InstExclusionSetTy *ExclusionSet,
790     std::function<bool(const Function &F)> GoBackwardsCB) {
791   return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
792                                   ExclusionSet, GoBackwardsCB);
793 }
794 
795 bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
796                                     const AbstractAttribute &QueryingAA) {
797   if (isa<UndefValue>(Obj))
798     return true;
799   if (isa<AllocaInst>(Obj)) {
800     InformationCache &InfoCache = A.getInfoCache();
801     if (!InfoCache.stackIsAccessibleByOtherThreads()) {
802       LLVM_DEBUG(
803           dbgs() << "[AA] Object '" << Obj
804                  << "' is thread local; stack objects are thread local.\n");
805       return true;
806     }
807     bool IsKnownNoCapture;
808     bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
809         A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
810         IsKnownNoCapture);
811     LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
812                       << (IsAssumedNoCapture ? "" : "not") << " thread local; "
813                       << (IsAssumedNoCapture ? "non-" : "")
814                       << "captured stack object.\n");
815     return IsAssumedNoCapture;
816   }
817   if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
818     if (GV->isConstant()) {
819       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
820                         << "' is thread local; constant global\n");
821       return true;
822     }
823     if (GV->isThreadLocal()) {
824       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
825                         << "' is thread local; thread local global\n");
826       return true;
827     }
828   }
829 
830   if (A.getInfoCache().targetIsGPU()) {
831     if (Obj.getType()->getPointerAddressSpace() ==
832         (int)AA::GPUAddressSpace::Local) {
833       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
834                         << "' is thread local; GPU local memory\n");
835       return true;
836     }
837     if (Obj.getType()->getPointerAddressSpace() ==
838         (int)AA::GPUAddressSpace::Constant) {
839       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
840                         << "' is thread local; GPU constant memory\n");
841       return true;
842     }
843   }
844 
845   LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
846   return false;
847 }
848 
849 bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
850                                         const AbstractAttribute &QueryingAA) {
851   if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
852     return false;
853 
854   SmallSetVector<const Value *, 8> Ptrs;
855 
856   auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
857     if (!Loc || !Loc->Ptr) {
858       LLVM_DEBUG(
859           dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
860       return false;
861     }
862     Ptrs.insert(Loc->Ptr);
863     return true;
864   };
865 
866   if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
867     if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
868       return true;
869     if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
870       if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
871         return true;
872   } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
873     return true;
874 
875   return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
876 }
877 
878 bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
879                                         ArrayRef<const Value *> Ptrs,
880                                         const AbstractAttribute &QueryingAA,
881                                         const Instruction *CtxI) {
882   for (const Value *Ptr : Ptrs) {
883     if (!Ptr) {
884       LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
885       return true;
886     }
887 
888     auto Pred = [&](Value &Obj) {
889       if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
890         return true;
891       LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
892                         << "'; -> requires barrier\n");
893       return false;
894     };
895 
896     const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
897         QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
898     if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
899       return true;
900   }
901   return false;
902 }
903 
904 /// Return true if \p New is equal or worse than \p Old.
905 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
906   if (!Old.isIntAttribute())
907     return true;
908 
909   return Old.getValueAsInt() >= New.getValueAsInt();
910 }
911 
912 /// Return true if the information provided by \p Attr was added to the
913 /// attribute set \p AttrSet. This is only the case if it was not already
914 /// present in \p AttrSet.
915 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
916                              AttributeSet AttrSet, bool ForceReplace,
917                              AttrBuilder &AB) {
918 
919   if (Attr.isEnumAttribute()) {
920     Attribute::AttrKind Kind = Attr.getKindAsEnum();
921     if (AttrSet.hasAttribute(Kind))
922       return false;
923     AB.addAttribute(Kind);
924     return true;
925   }
926   if (Attr.isStringAttribute()) {
927     StringRef Kind = Attr.getKindAsString();
928     if (AttrSet.hasAttribute(Kind)) {
929       if (!ForceReplace)
930         return false;
931     }
932     AB.addAttribute(Kind, Attr.getValueAsString());
933     return true;
934   }
935   if (Attr.isIntAttribute()) {
936     Attribute::AttrKind Kind = Attr.getKindAsEnum();
937     if (!ForceReplace && Kind == Attribute::Memory) {
938       MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
939       if (ME == AttrSet.getMemoryEffects())
940         return false;
941       AB.addMemoryAttr(ME);
942       return true;
943     }
944     if (AttrSet.hasAttribute(Kind)) {
945       if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
946         return false;
947     }
948     AB.addAttribute(Attr);
949     return true;
950   }
951 
952   llvm_unreachable("Expected enum or string attribute!");
953 }
954 
955 Argument *IRPosition::getAssociatedArgument() const {
956   if (getPositionKind() == IRP_ARGUMENT)
957     return cast<Argument>(&getAnchorValue());
958 
959   // Not an Argument and no argument number means this is not a call site
960   // argument, thus we cannot find a callback argument to return.
961   int ArgNo = getCallSiteArgNo();
962   if (ArgNo < 0)
963     return nullptr;
964 
965   // Use abstract call sites to make the connection between the call site
966   // values and the ones in callbacks. If a callback was found that makes use
967   // of the underlying call site operand, we want the corresponding callback
968   // callee argument and not the direct callee argument.
969   std::optional<Argument *> CBCandidateArg;
970   SmallVector<const Use *, 4> CallbackUses;
971   const auto &CB = cast<CallBase>(getAnchorValue());
972   AbstractCallSite::getCallbackUses(CB, CallbackUses);
973   for (const Use *U : CallbackUses) {
974     AbstractCallSite ACS(U);
975     assert(ACS && ACS.isCallbackCall());
976     if (!ACS.getCalledFunction())
977       continue;
978 
979     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
980 
981       // Test if the underlying call site operand is argument number u of the
982       // callback callee.
983       if (ACS.getCallArgOperandNo(u) != ArgNo)
984         continue;
985 
986       assert(ACS.getCalledFunction()->arg_size() > u &&
987              "ACS mapped into var-args arguments!");
988       if (CBCandidateArg) {
989         CBCandidateArg = nullptr;
990         break;
991       }
992       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
993     }
994   }
995 
996   // If we found a unique callback candidate argument, return it.
997   if (CBCandidateArg && *CBCandidateArg)
998     return *CBCandidateArg;
999 
1000   // If no callbacks were found, or none used the underlying call site operand
1001   // exclusively, use the direct callee argument if available.
1002   auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1003   if (Callee && Callee->arg_size() > unsigned(ArgNo))
1004     return Callee->getArg(ArgNo);
1005 
1006   return nullptr;
1007 }
1008 
1009 ChangeStatus AbstractAttribute::update(Attributor &A) {
1010   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1011   if (getState().isAtFixpoint())
1012     return HasChanged;
1013 
1014   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1015 
1016   HasChanged = updateImpl(A);
1017 
1018   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1019                     << "\n");
1020 
1021   return HasChanged;
1022 }
1023 
1024 bool Attributor::getAttrsFromAssumes(const IRPosition &IRP,
1025                                      Attribute::AttrKind AK,
1026                                      SmallVectorImpl<Attribute> &Attrs) {
1027   assert(IRP.getPositionKind() != IRPosition::IRP_INVALID &&
1028          "Did expect a valid position!");
1029   MustBeExecutedContextExplorer *Explorer =
1030       getInfoCache().getMustBeExecutedContextExplorer();
1031   if (!Explorer)
1032     return false;
1033 
1034   Value &AssociatedValue = IRP.getAssociatedValue();
1035 
1036   const Assume2KnowledgeMap &A2K =
1037       getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1038 
1039   // Check if we found any potential assume use, if not we don't need to create
1040   // explorer iterators.
1041   if (A2K.empty())
1042     return false;
1043 
1044   LLVMContext &Ctx = AssociatedValue.getContext();
1045   unsigned AttrsSize = Attrs.size();
1046   auto EIt = Explorer->begin(IRP.getCtxI()),
1047        EEnd = Explorer->end(IRP.getCtxI());
1048   for (const auto &It : A2K)
1049     if (Explorer->findInContextOf(It.first, EIt, EEnd))
1050       Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1051   return AttrsSize != Attrs.size();
1052 }
1053 
1054 template <typename DescTy>
1055 ChangeStatus
1056 Attributor::updateAttrMap(const IRPosition &IRP,
1057                           const ArrayRef<DescTy> &AttrDescs,
1058                           function_ref<bool(const DescTy &, AttributeSet,
1059                                             AttributeMask &, AttrBuilder &)>
1060                               CB) {
1061   if (AttrDescs.empty())
1062     return ChangeStatus::UNCHANGED;
1063   switch (IRP.getPositionKind()) {
1064   case IRPosition::IRP_FLOAT:
1065   case IRPosition::IRP_INVALID:
1066     return ChangeStatus::UNCHANGED;
1067   default:
1068     break;
1069   };
1070 
1071   AttributeList AL;
1072   Value *AttrListAnchor = IRP.getAttrListAnchor();
1073   auto It = AttrsMap.find(AttrListAnchor);
1074   if (It == AttrsMap.end())
1075     AL = IRP.getAttrList();
1076   else
1077     AL = It->getSecond();
1078 
1079   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1080   auto AttrIdx = IRP.getAttrIdx();
1081   AttributeSet AS = AL.getAttributes(AttrIdx);
1082   AttributeMask AM;
1083   AttrBuilder AB(Ctx);
1084 
1085   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1086   for (const DescTy &AttrDesc : AttrDescs)
1087     if (CB(AttrDesc, AS, AM, AB))
1088       HasChanged = ChangeStatus::CHANGED;
1089 
1090   if (HasChanged == ChangeStatus::UNCHANGED)
1091     return ChangeStatus::UNCHANGED;
1092 
1093   AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1094   AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1095   AttrsMap[AttrListAnchor] = AL;
1096   return ChangeStatus::CHANGED;
1097 }
1098 
1099 bool Attributor::hasAttr(const IRPosition &IRP,
1100                          ArrayRef<Attribute::AttrKind> AttrKinds,
1101                          bool IgnoreSubsumingPositions,
1102                          Attribute::AttrKind ImpliedAttributeKind) {
1103   bool Implied = false;
1104   bool HasAttr = false;
1105   auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1106                        AttributeMask &, AttrBuilder &) {
1107     if (AttrSet.hasAttribute(Kind)) {
1108       Implied |= Kind != ImpliedAttributeKind;
1109       HasAttr = true;
1110     }
1111     return false;
1112   };
1113   for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1114     updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1115     if (HasAttr)
1116       break;
1117     // The first position returned by the SubsumingPositionIterator is
1118     // always the position itself. If we ignore subsuming positions we
1119     // are done after the first iteration.
1120     if (IgnoreSubsumingPositions)
1121       break;
1122     Implied = true;
1123   }
1124   if (!HasAttr) {
1125     Implied = true;
1126     SmallVector<Attribute> Attrs;
1127     for (Attribute::AttrKind AK : AttrKinds)
1128       if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1129         HasAttr = true;
1130         break;
1131       }
1132   }
1133 
1134   // Check if we should manifest the implied attribute kind at the IRP.
1135   if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1136     manifestAttrs(IRP, {Attribute::get(IRP.getAnchorValue().getContext(),
1137                                        ImpliedAttributeKind)});
1138   return HasAttr;
1139 }
1140 
1141 void Attributor::getAttrs(const IRPosition &IRP,
1142                           ArrayRef<Attribute::AttrKind> AttrKinds,
1143                           SmallVectorImpl<Attribute> &Attrs,
1144                           bool IgnoreSubsumingPositions) {
1145   auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1146                            AttributeSet AttrSet, AttributeMask &,
1147                            AttrBuilder &) {
1148     if (AttrSet.hasAttribute(Kind))
1149       Attrs.push_back(AttrSet.getAttribute(Kind));
1150     return false;
1151   };
1152   for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1153     updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1154     // The first position returned by the SubsumingPositionIterator is
1155     // always the position itself. If we ignore subsuming positions we
1156     // are done after the first iteration.
1157     if (IgnoreSubsumingPositions)
1158       break;
1159   }
1160   for (Attribute::AttrKind AK : AttrKinds)
1161     getAttrsFromAssumes(IRP, AK, Attrs);
1162 }
1163 
1164 ChangeStatus
1165 Attributor::removeAttrs(const IRPosition &IRP,
1166                         const ArrayRef<Attribute::AttrKind> &AttrKinds) {
1167   auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1168                           AttributeMask &AM, AttrBuilder &) {
1169     if (!AttrSet.hasAttribute(Kind))
1170       return false;
1171     AM.addAttribute(Kind);
1172     return true;
1173   };
1174   return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1175 }
1176 
1177 ChangeStatus Attributor::manifestAttrs(const IRPosition &IRP,
1178                                        const ArrayRef<Attribute> &Attrs,
1179                                        bool ForceReplace) {
1180   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1181   auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1182                        AttributeMask &, AttrBuilder &AB) {
1183     return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1184   };
1185   return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1186 }
1187 
1188 const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
1189 const IRPosition
1190     IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
1191 
1192 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
1193   IRPositions.emplace_back(IRP);
1194 
1195   // Helper to determine if operand bundles on a call site are benign or
1196   // potentially problematic. We handle only llvm.assume for now.
1197   auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1198     return (isa<IntrinsicInst>(CB) &&
1199             cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1200   };
1201 
1202   const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1203   switch (IRP.getPositionKind()) {
1204   case IRPosition::IRP_INVALID:
1205   case IRPosition::IRP_FLOAT:
1206   case IRPosition::IRP_FUNCTION:
1207     return;
1208   case IRPosition::IRP_ARGUMENT:
1209   case IRPosition::IRP_RETURNED:
1210     IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1211     return;
1212   case IRPosition::IRP_CALL_SITE:
1213     assert(CB && "Expected call site!");
1214     // TODO: We need to look at the operand bundles similar to the redirection
1215     //       in CallBase.
1216     if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1217       if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1218         IRPositions.emplace_back(IRPosition::function(*Callee));
1219     return;
1220   case IRPosition::IRP_CALL_SITE_RETURNED:
1221     assert(CB && "Expected call site!");
1222     // TODO: We need to look at the operand bundles similar to the redirection
1223     //       in CallBase.
1224     if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1225       if (auto *Callee =
1226               dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1227         IRPositions.emplace_back(IRPosition::returned(*Callee));
1228         IRPositions.emplace_back(IRPosition::function(*Callee));
1229         for (const Argument &Arg : Callee->args())
1230           if (Arg.hasReturnedAttr()) {
1231             IRPositions.emplace_back(
1232                 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1233             IRPositions.emplace_back(
1234                 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1235             IRPositions.emplace_back(IRPosition::argument(Arg));
1236           }
1237       }
1238     }
1239     IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1240     return;
1241   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
1242     assert(CB && "Expected call site!");
1243     // TODO: We need to look at the operand bundles similar to the redirection
1244     //       in CallBase.
1245     if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1246       auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1247       if (Callee) {
1248         if (Argument *Arg = IRP.getAssociatedArgument())
1249           IRPositions.emplace_back(IRPosition::argument(*Arg));
1250         IRPositions.emplace_back(IRPosition::function(*Callee));
1251       }
1252     }
1253     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1254     return;
1255   }
1256   }
1257 }
1258 
1259 void IRPosition::verify() {
1260 #ifdef EXPENSIVE_CHECKS
1261   switch (getPositionKind()) {
1262   case IRP_INVALID:
1263     assert((CBContext == nullptr) &&
1264            "Invalid position must not have CallBaseContext!");
1265     assert(!Enc.getOpaqueValue() &&
1266            "Expected a nullptr for an invalid position!");
1267     return;
1268   case IRP_FLOAT:
1269     assert((!isa<Argument>(&getAssociatedValue())) &&
1270            "Expected specialized kind for argument values!");
1271     return;
1272   case IRP_RETURNED:
1273     assert(isa<Function>(getAsValuePtr()) &&
1274            "Expected function for a 'returned' position!");
1275     assert(getAsValuePtr() == &getAssociatedValue() &&
1276            "Associated value mismatch!");
1277     return;
1278   case IRP_CALL_SITE_RETURNED:
1279     assert((CBContext == nullptr) &&
1280            "'call site returned' position must not have CallBaseContext!");
1281     assert((isa<CallBase>(getAsValuePtr())) &&
1282            "Expected call base for 'call site returned' position!");
1283     assert(getAsValuePtr() == &getAssociatedValue() &&
1284            "Associated value mismatch!");
1285     return;
1286   case IRP_CALL_SITE:
1287     assert((CBContext == nullptr) &&
1288            "'call site function' position must not have CallBaseContext!");
1289     assert((isa<CallBase>(getAsValuePtr())) &&
1290            "Expected call base for 'call site function' position!");
1291     assert(getAsValuePtr() == &getAssociatedValue() &&
1292            "Associated value mismatch!");
1293     return;
1294   case IRP_FUNCTION:
1295     assert(isa<Function>(getAsValuePtr()) &&
1296            "Expected function for a 'function' position!");
1297     assert(getAsValuePtr() == &getAssociatedValue() &&
1298            "Associated value mismatch!");
1299     return;
1300   case IRP_ARGUMENT:
1301     assert(isa<Argument>(getAsValuePtr()) &&
1302            "Expected argument for a 'argument' position!");
1303     assert(getAsValuePtr() == &getAssociatedValue() &&
1304            "Associated value mismatch!");
1305     return;
1306   case IRP_CALL_SITE_ARGUMENT: {
1307     assert((CBContext == nullptr) &&
1308            "'call site argument' position must not have CallBaseContext!");
1309     Use *U = getAsUsePtr();
1310     (void)U; // Silence unused variable warning.
1311     assert(U && "Expected use for a 'call site argument' position!");
1312     assert(isa<CallBase>(U->getUser()) &&
1313            "Expected call base user for a 'call site argument' position!");
1314     assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1315            "Expected call base argument operand for a 'call site argument' "
1316            "position");
1317     assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1318                unsigned(getCallSiteArgNo()) &&
1319            "Argument number mismatch!");
1320     assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1321     return;
1322   }
1323   }
1324 #endif
1325 }
1326 
1327 std::optional<Constant *>
1328 Attributor::getAssumedConstant(const IRPosition &IRP,
1329                                const AbstractAttribute &AA,
1330                                bool &UsedAssumedInformation) {
1331   // First check all callbacks provided by outside AAs. If any of them returns
1332   // a non-null value that is different from the associated value, or
1333   // std::nullopt, we assume it's simplified.
1334   for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1335     std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1336     if (!SimplifiedV)
1337       return std::nullopt;
1338     if (isa_and_nonnull<Constant>(*SimplifiedV))
1339       return cast<Constant>(*SimplifiedV);
1340     return nullptr;
1341   }
1342   if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1343     return C;
1344   SmallVector<AA::ValueAndContext> Values;
1345   if (getAssumedSimplifiedValues(IRP, &AA, Values,
1346                                  AA::ValueScope::Interprocedural,
1347                                  UsedAssumedInformation)) {
1348     if (Values.empty())
1349       return std::nullopt;
1350     if (auto *C = dyn_cast_or_null<Constant>(
1351             AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1352       return C;
1353   }
1354   return nullptr;
1355 }
1356 
1357 std::optional<Value *> Attributor::getAssumedSimplified(
1358     const IRPosition &IRP, const AbstractAttribute *AA,
1359     bool &UsedAssumedInformation, AA::ValueScope S) {
1360   // First check all callbacks provided by outside AAs. If any of them returns
1361   // a non-null value that is different from the associated value, or
1362   // std::nullopt, we assume it's simplified.
1363   for (auto &CB : SimplificationCallbacks.lookup(IRP))
1364     return CB(IRP, AA, UsedAssumedInformation);
1365 
1366   SmallVector<AA::ValueAndContext> Values;
1367   if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1368     return &IRP.getAssociatedValue();
1369   if (Values.empty())
1370     return std::nullopt;
1371   if (AA)
1372     if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1373       return V;
1374   if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1375       IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1376     return nullptr;
1377   return &IRP.getAssociatedValue();
1378 }
1379 
1380 bool Attributor::getAssumedSimplifiedValues(
1381     const IRPosition &InitialIRP, const AbstractAttribute *AA,
1382     SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1383     bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1384   SmallPtrSet<Value *, 8> Seen;
1385   SmallVector<IRPosition, 8> Worklist;
1386   Worklist.push_back(InitialIRP);
1387   while (!Worklist.empty()) {
1388     const IRPosition &IRP = Worklist.pop_back_val();
1389 
1390     // First check all callbacks provided by outside AAs. If any of them returns
1391     // a non-null value that is different from the associated value, or
1392     // std::nullopt, we assume it's simplified.
1393     int NV = Values.size();
1394     const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1395     for (const auto &CB : SimplificationCBs) {
1396       std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1397       if (!CBResult.has_value())
1398         continue;
1399       Value *V = *CBResult;
1400       if (!V)
1401         return false;
1402       if ((S & AA::ValueScope::Interprocedural) ||
1403           AA::isValidInScope(*V, IRP.getAnchorScope()))
1404         Values.push_back(AA::ValueAndContext{*V, nullptr});
1405       else
1406         return false;
1407     }
1408     if (SimplificationCBs.empty()) {
1409       // If no high-level/outside simplification occurred, use
1410       // AAPotentialValues.
1411       const auto *PotentialValuesAA =
1412           getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1413       if (PotentialValuesAA && PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1414         UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1415       } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1416         Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1417       } else {
1418         // TODO: We could visit all returns and add the operands.
1419         return false;
1420       }
1421     }
1422 
1423     if (!RecurseForSelectAndPHI)
1424       break;
1425 
1426     for (int I = NV, E = Values.size(); I < E; ++I) {
1427       Value *V = Values[I].getValue();
1428       if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1429         continue;
1430       if (!Seen.insert(V).second)
1431         continue;
1432       // Move the last element to this slot.
1433       Values[I] = Values[E - 1];
1434       // Eliminate the last slot, adjust the indices.
1435       Values.pop_back();
1436       --E;
1437       --I;
1438       // Add a new value (select or phi) to the worklist.
1439       Worklist.push_back(IRPosition::value(*V));
1440     }
1441   }
1442   return true;
1443 }
1444 
1445 std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
1446     std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1447     bool &UsedAssumedInformation) {
1448   if (!V)
1449     return V;
1450   if (*V == nullptr || isa<Constant>(*V))
1451     return V;
1452   if (auto *Arg = dyn_cast<Argument>(*V))
1453     if (CB.getCalledOperand() == Arg->getParent() &&
1454         CB.arg_size() > Arg->getArgNo())
1455       if (!Arg->hasPointeeInMemoryValueAttr())
1456         return getAssumedSimplified(
1457             IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1458             UsedAssumedInformation, AA::Intraprocedural);
1459   return nullptr;
1460 }
1461 
1462 Attributor::~Attributor() {
1463   // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1464   // thus we cannot delete them. We can, and want to, destruct them though.
1465   for (auto &It : AAMap) {
1466     AbstractAttribute *AA = It.getSecond();
1467     AA->~AbstractAttribute();
1468   }
1469 }
1470 
1471 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1472                                const AAIsDead *FnLivenessAA,
1473                                bool &UsedAssumedInformation,
1474                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
1475   if (!Configuration.UseLiveness)
1476     return false;
1477   const IRPosition &IRP = AA.getIRPosition();
1478   if (!Functions.count(IRP.getAnchorScope()))
1479     return false;
1480   return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1481                        CheckBBLivenessOnly, DepClass);
1482 }
1483 
1484 bool Attributor::isAssumedDead(const Use &U,
1485                                const AbstractAttribute *QueryingAA,
1486                                const AAIsDead *FnLivenessAA,
1487                                bool &UsedAssumedInformation,
1488                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
1489   if (!Configuration.UseLiveness)
1490     return false;
1491   Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1492   if (!UserI)
1493     return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1494                          UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1495 
1496   if (auto *CB = dyn_cast<CallBase>(UserI)) {
1497     // For call site argument uses we can check if the argument is
1498     // unused/dead.
1499     if (CB->isArgOperand(&U)) {
1500       const IRPosition &CSArgPos =
1501           IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1502       return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1503                            UsedAssumedInformation, CheckBBLivenessOnly,
1504                            DepClass);
1505     }
1506   } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1507     const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1508     return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1509                          UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1510   } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1511     BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1512     return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1513                          UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1514   } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1515     if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1516       const IRPosition IRP = IRPosition::inst(*SI);
1517       const AAIsDead *IsDeadAA =
1518           getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1519       if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1520         if (QueryingAA)
1521           recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1522         if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1523           UsedAssumedInformation = true;
1524         return true;
1525       }
1526     }
1527   }
1528 
1529   return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1530                        UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1531 }
1532 
1533 bool Attributor::isAssumedDead(const Instruction &I,
1534                                const AbstractAttribute *QueryingAA,
1535                                const AAIsDead *FnLivenessAA,
1536                                bool &UsedAssumedInformation,
1537                                bool CheckBBLivenessOnly, DepClassTy DepClass,
1538                                bool CheckForDeadStore) {
1539   if (!Configuration.UseLiveness)
1540     return false;
1541   const IRPosition::CallBaseContext *CBCtx =
1542       QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1543 
1544   if (ManifestAddedBlocks.contains(I.getParent()))
1545     return false;
1546 
1547   const Function &F = *I.getFunction();
1548   if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1549     FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1550                                               QueryingAA, DepClassTy::NONE);
1551 
1552   // Don't use recursive reasoning.
1553   if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1554     return false;
1555 
1556   // If we have a context instruction and a liveness AA we use it.
1557   if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1558                           : FnLivenessAA->isAssumedDead(&I)) {
1559     if (QueryingAA)
1560       recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1561     if (!FnLivenessAA->isKnownDead(&I))
1562       UsedAssumedInformation = true;
1563     return true;
1564   }
1565 
1566   if (CheckBBLivenessOnly)
1567     return false;
1568 
1569   const IRPosition IRP = IRPosition::inst(I, CBCtx);
1570   const AAIsDead *IsDeadAA =
1571       getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1572 
1573   // Don't use recursive reasoning.
1574   if (!IsDeadAA || QueryingAA == IsDeadAA)
1575     return false;
1576 
1577   if (IsDeadAA->isAssumedDead()) {
1578     if (QueryingAA)
1579       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1580     if (!IsDeadAA->isKnownDead())
1581       UsedAssumedInformation = true;
1582     return true;
1583   }
1584 
1585   if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1586     if (QueryingAA)
1587       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1588     if (!IsDeadAA->isKnownDead())
1589       UsedAssumedInformation = true;
1590     return true;
1591   }
1592 
1593   return false;
1594 }
1595 
1596 bool Attributor::isAssumedDead(const IRPosition &IRP,
1597                                const AbstractAttribute *QueryingAA,
1598                                const AAIsDead *FnLivenessAA,
1599                                bool &UsedAssumedInformation,
1600                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
1601   if (!Configuration.UseLiveness)
1602     return false;
1603   // Don't check liveness for constants, e.g. functions, used as (floating)
1604   // values since the context instruction and such is here meaningless.
1605   if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
1606       isa<Constant>(IRP.getAssociatedValue())) {
1607     return false;
1608   }
1609 
1610   Instruction *CtxI = IRP.getCtxI();
1611   if (CtxI &&
1612       isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1613                     /* CheckBBLivenessOnly */ true,
1614                     CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1615     return true;
1616 
1617   if (CheckBBLivenessOnly)
1618     return false;
1619 
1620   // If we haven't succeeded we query the specific liveness info for the IRP.
1621   const AAIsDead *IsDeadAA;
1622   if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1623     IsDeadAA = getOrCreateAAFor<AAIsDead>(
1624         IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
1625         QueryingAA, DepClassTy::NONE);
1626   else
1627     IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1628 
1629   // Don't use recursive reasoning.
1630   if (!IsDeadAA || QueryingAA == IsDeadAA)
1631     return false;
1632 
1633   if (IsDeadAA->isAssumedDead()) {
1634     if (QueryingAA)
1635       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1636     if (!IsDeadAA->isKnownDead())
1637       UsedAssumedInformation = true;
1638     return true;
1639   }
1640 
1641   return false;
1642 }
1643 
1644 bool Attributor::isAssumedDead(const BasicBlock &BB,
1645                                const AbstractAttribute *QueryingAA,
1646                                const AAIsDead *FnLivenessAA,
1647                                DepClassTy DepClass) {
1648   if (!Configuration.UseLiveness)
1649     return false;
1650   const Function &F = *BB.getParent();
1651   if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1652     FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1653                                               QueryingAA, DepClassTy::NONE);
1654 
1655   // Don't use recursive reasoning.
1656   if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1657     return false;
1658 
1659   if (FnLivenessAA->isAssumedDead(&BB)) {
1660     if (QueryingAA)
1661       recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1662     return true;
1663   }
1664 
1665   return false;
1666 }
1667 
1668 bool Attributor::checkForAllUses(
1669     function_ref<bool(const Use &, bool &)> Pred,
1670     const AbstractAttribute &QueryingAA, const Value &V,
1671     bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1672     bool IgnoreDroppableUses,
1673     function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1674 
1675   // Check virtual uses first.
1676   for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1677     if (!CB(*this, &QueryingAA))
1678       return false;
1679 
1680   // Check the trivial case first as it catches void values.
1681   if (V.use_empty())
1682     return true;
1683 
1684   const IRPosition &IRP = QueryingAA.getIRPosition();
1685   SmallVector<const Use *, 16> Worklist;
1686   SmallPtrSet<const Use *, 16> Visited;
1687 
1688   auto AddUsers = [&](const Value &V, const Use *OldUse) {
1689     for (const Use &UU : V.uses()) {
1690       if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1691         LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1692                              "rejected by the equivalence call back: "
1693                           << *UU << "!\n");
1694         return false;
1695       }
1696 
1697       Worklist.push_back(&UU);
1698     }
1699     return true;
1700   };
1701 
1702   AddUsers(V, /* OldUse */ nullptr);
1703 
1704   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1705                     << " initial uses to check\n");
1706 
1707   const Function *ScopeFn = IRP.getAnchorScope();
1708   const auto *LivenessAA =
1709       ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1710                                    DepClassTy::NONE)
1711               : nullptr;
1712 
1713   while (!Worklist.empty()) {
1714     const Use *U = Worklist.pop_back_val();
1715     if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
1716       continue;
1717     DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1718       if (auto *Fn = dyn_cast<Function>(U->getUser()))
1719         dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1720                << "\n";
1721       else
1722         dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1723                << "\n";
1724     });
1725     bool UsedAssumedInformation = false;
1726     if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1727                       CheckBBLivenessOnly, LivenessDepClass)) {
1728       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1729                       dbgs() << "[Attributor] Dead use, skip!\n");
1730       continue;
1731     }
1732     if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1733       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1734                       dbgs() << "[Attributor] Droppable user, skip!\n");
1735       continue;
1736     }
1737 
1738     if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1739       if (&SI->getOperandUse(0) == U) {
1740         if (!Visited.insert(U).second)
1741           continue;
1742         SmallSetVector<Value *, 4> PotentialCopies;
1743         if (AA::getPotentialCopiesOfStoredValue(
1744                 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1745                 /* OnlyExact */ true)) {
1746           DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1747                           dbgs()
1748                               << "[Attributor] Value is stored, continue with "
1749                               << PotentialCopies.size()
1750                               << " potential copies instead!\n");
1751           for (Value *PotentialCopy : PotentialCopies)
1752             if (!AddUsers(*PotentialCopy, U))
1753               return false;
1754           continue;
1755         }
1756       }
1757     }
1758 
1759     bool Follow = false;
1760     if (!Pred(*U, Follow))
1761       return false;
1762     if (!Follow)
1763       continue;
1764 
1765     User &Usr = *U->getUser();
1766     AddUsers(Usr, /* OldUse */ nullptr);
1767 
1768     auto *RI = dyn_cast<ReturnInst>(&Usr);
1769     if (!RI)
1770       continue;
1771 
1772     Function &F = *RI->getFunction();
1773     auto CallSitePred = [&](AbstractCallSite ACS) {
1774       return AddUsers(*ACS.getInstruction(), U);
1775     };
1776     if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1777                               &QueryingAA, UsedAssumedInformation)) {
1778       LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1779                            "to all call sites: "
1780                         << *RI << "\n");
1781       return false;
1782     }
1783   }
1784 
1785   return true;
1786 }
1787 
1788 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1789                                       const AbstractAttribute &QueryingAA,
1790                                       bool RequireAllCallSites,
1791                                       bool &UsedAssumedInformation) {
1792   // We can try to determine information from
1793   // the call sites. However, this is only possible all call sites are known,
1794   // hence the function has internal linkage.
1795   const IRPosition &IRP = QueryingAA.getIRPosition();
1796   const Function *AssociatedFunction = IRP.getAssociatedFunction();
1797   if (!AssociatedFunction) {
1798     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1799                       << "\n");
1800     return false;
1801   }
1802 
1803   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1804                               &QueryingAA, UsedAssumedInformation);
1805 }
1806 
1807 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1808                                       const Function &Fn,
1809                                       bool RequireAllCallSites,
1810                                       const AbstractAttribute *QueryingAA,
1811                                       bool &UsedAssumedInformation,
1812                                       bool CheckPotentiallyDead) {
1813   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1814     LLVM_DEBUG(
1815         dbgs()
1816         << "[Attributor] Function " << Fn.getName()
1817         << " has no internal linkage, hence not all call sites are known\n");
1818     return false;
1819   }
1820   // Check virtual uses first.
1821   for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1822     if (!CB(*this, QueryingAA))
1823       return false;
1824 
1825   SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
1826   for (unsigned u = 0; u < Uses.size(); ++u) {
1827     const Use &U = *Uses[u];
1828     DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1829       if (auto *Fn = dyn_cast<Function>(U))
1830         dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1831                << *U.getUser() << "\n";
1832       else
1833         dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1834                << "\n";
1835     });
1836     if (!CheckPotentiallyDead &&
1837         isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1838                       /* CheckBBLivenessOnly */ true)) {
1839       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1840                       dbgs() << "[Attributor] Dead use, skip!\n");
1841       continue;
1842     }
1843     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1844       if (CE->isCast() && CE->getType()->isPointerTy()) {
1845         DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1846           dbgs() << "[Attributor] Use, is constant cast expression, add "
1847                  << CE->getNumUses() << " uses of that expression instead!\n";
1848         });
1849         for (const Use &CEU : CE->uses())
1850           Uses.push_back(&CEU);
1851         continue;
1852       }
1853     }
1854 
1855     AbstractCallSite ACS(&U);
1856     if (!ACS) {
1857       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1858                         << " has non call site use " << *U.get() << " in "
1859                         << *U.getUser() << "\n");
1860       // BlockAddress users are allowed.
1861       if (isa<BlockAddress>(U.getUser()))
1862         continue;
1863       return false;
1864     }
1865 
1866     const Use *EffectiveUse =
1867         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1868     if (!ACS.isCallee(EffectiveUse)) {
1869       if (!RequireAllCallSites) {
1870         LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1871                           << " is not a call of " << Fn.getName()
1872                           << ", skip use\n");
1873         continue;
1874       }
1875       LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1876                         << " is an invalid use of " << Fn.getName() << "\n");
1877       return false;
1878     }
1879 
1880     // Make sure the arguments that can be matched between the call site and the
1881     // callee argee on their type. It is unlikely they do not and it doesn't
1882     // make sense for all attributes to know/care about this.
1883     assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1884     unsigned MinArgsParams =
1885         std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1886     for (unsigned u = 0; u < MinArgsParams; ++u) {
1887       Value *CSArgOp = ACS.getCallArgOperand(u);
1888       if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1889         LLVM_DEBUG(
1890             dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1891                    << u << "@" << Fn.getName() << ": "
1892                    << *Fn.getArg(u)->getType() << " vs. "
1893                    << *ACS.getCallArgOperand(u)->getType() << "\n");
1894         return false;
1895       }
1896     }
1897 
1898     if (Pred(ACS))
1899       continue;
1900 
1901     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1902                       << *ACS.getInstruction() << "\n");
1903     return false;
1904   }
1905 
1906   return true;
1907 }
1908 
1909 bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1910   // TODO: Maintain a cache of Values that are
1911   // on the pathway from a Argument to a Instruction that would effect the
1912   // liveness/return state etc.
1913   return EnableCallSiteSpecific;
1914 }
1915 
1916 bool Attributor::checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
1917                                            const AbstractAttribute &QueryingAA,
1918                                            AA::ValueScope S,
1919                                            bool RecurseForSelectAndPHI) {
1920 
1921   const IRPosition &IRP = QueryingAA.getIRPosition();
1922   const Function *AssociatedFunction = IRP.getAssociatedFunction();
1923   if (!AssociatedFunction)
1924     return false;
1925 
1926   bool UsedAssumedInformation = false;
1927   SmallVector<AA::ValueAndContext> Values;
1928   if (!getAssumedSimplifiedValues(
1929           IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
1930           UsedAssumedInformation, RecurseForSelectAndPHI))
1931     return false;
1932 
1933   return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
1934     return Pred(*VAC.getValue());
1935   });
1936 }
1937 
1938 static bool checkForAllInstructionsImpl(
1939     Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
1940     function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
1941     const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes,
1942     bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
1943     bool CheckPotentiallyDead = false) {
1944   for (unsigned Opcode : Opcodes) {
1945     // Check if we have instructions with this opcode at all first.
1946     auto *Insts = OpcodeInstMap.lookup(Opcode);
1947     if (!Insts)
1948       continue;
1949 
1950     for (Instruction *I : *Insts) {
1951       // Skip dead instructions.
1952       if (A && !CheckPotentiallyDead &&
1953           A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
1954                            UsedAssumedInformation, CheckBBLivenessOnly)) {
1955         DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1956                         dbgs() << "[Attributor] Instruction " << *I
1957                                << " is potentially dead, skip!\n";);
1958         continue;
1959       }
1960 
1961       if (!Pred(*I))
1962         return false;
1963     }
1964   }
1965   return true;
1966 }
1967 
1968 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
1969                                          const Function *Fn,
1970                                          const AbstractAttribute &QueryingAA,
1971                                          const ArrayRef<unsigned> &Opcodes,
1972                                          bool &UsedAssumedInformation,
1973                                          bool CheckBBLivenessOnly,
1974                                          bool CheckPotentiallyDead) {
1975   // Since we need to provide instructions we have to have an exact definition.
1976   if (!Fn || Fn->isDeclaration())
1977     return false;
1978 
1979   const IRPosition &QueryIRP = IRPosition::function(*Fn);
1980   const auto *LivenessAA =
1981       CheckPotentiallyDead
1982           ? nullptr
1983           : (getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
1984 
1985   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
1986   if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
1987                                    LivenessAA, Opcodes, UsedAssumedInformation,
1988                                    CheckBBLivenessOnly, CheckPotentiallyDead))
1989     return false;
1990 
1991   return true;
1992 }
1993 
1994 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
1995                                          const AbstractAttribute &QueryingAA,
1996                                          const ArrayRef<unsigned> &Opcodes,
1997                                          bool &UsedAssumedInformation,
1998                                          bool CheckBBLivenessOnly,
1999                                          bool CheckPotentiallyDead) {
2000   const IRPosition &IRP = QueryingAA.getIRPosition();
2001   const Function *AssociatedFunction = IRP.getAssociatedFunction();
2002   return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
2003                                  UsedAssumedInformation, CheckBBLivenessOnly,
2004                                  CheckPotentiallyDead);
2005 }
2006 
2007 bool Attributor::checkForAllReadWriteInstructions(
2008     function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2009     bool &UsedAssumedInformation) {
2010   TimeTraceScope TS("checkForAllReadWriteInstructions");
2011 
2012   const Function *AssociatedFunction =
2013       QueryingAA.getIRPosition().getAssociatedFunction();
2014   if (!AssociatedFunction)
2015     return false;
2016 
2017   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2018   const auto *LivenessAA =
2019       getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2020 
2021   for (Instruction *I :
2022        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2023     // Skip dead instructions.
2024     if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2025                       UsedAssumedInformation))
2026       continue;
2027 
2028     if (!Pred(*I))
2029       return false;
2030   }
2031 
2032   return true;
2033 }
2034 
2035 void Attributor::runTillFixpoint() {
2036   TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2037   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2038                     << DG.SyntheticRoot.Deps.size()
2039                     << " abstract attributes.\n");
2040 
2041   // Now that all abstract attributes are collected and initialized we start
2042   // the abstract analysis.
2043 
2044   unsigned IterationCounter = 1;
2045   unsigned MaxIterations =
2046       Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2047 
2048   SmallVector<AbstractAttribute *, 32> ChangedAAs;
2049   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2050   Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
2051 
2052   do {
2053     // Remember the size to determine new attributes.
2054     size_t NumAAs = DG.SyntheticRoot.Deps.size();
2055     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2056                       << ", Worklist size: " << Worklist.size() << "\n");
2057 
2058     // For invalid AAs we can fix dependent AAs that have a required dependence,
2059     // thereby folding long dependence chains in a single step without the need
2060     // to run updates.
2061     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2062       AbstractAttribute *InvalidAA = InvalidAAs[u];
2063 
2064       // Check the dependences to fast track invalidation.
2065       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2066                       dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2067                              << " has " << InvalidAA->Deps.size()
2068                              << " required & optional dependences\n");
2069       for (auto &DepIt : InvalidAA->Deps) {
2070         AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2071         if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2072           DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2073                           dbgs() << " - recompute: " << *DepAA);
2074           Worklist.insert(DepAA);
2075           continue;
2076         }
2077         DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
2078                                                 << " - invalidate: " << *DepAA);
2079         DepAA->getState().indicatePessimisticFixpoint();
2080         assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2081         if (!DepAA->getState().isValidState())
2082           InvalidAAs.insert(DepAA);
2083         else
2084           ChangedAAs.push_back(DepAA);
2085       }
2086       InvalidAA->Deps.clear();
2087     }
2088 
2089     // Add all abstract attributes that are potentially dependent on one that
2090     // changed to the work list.
2091     for (AbstractAttribute *ChangedAA : ChangedAAs) {
2092       for (auto &DepIt : ChangedAA->Deps)
2093         Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2094       ChangedAA->Deps.clear();
2095     }
2096 
2097     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2098                       << ", Worklist+Dependent size: " << Worklist.size()
2099                       << "\n");
2100 
2101     // Reset the changed and invalid set.
2102     ChangedAAs.clear();
2103     InvalidAAs.clear();
2104 
2105     // Update all abstract attribute in the work list and record the ones that
2106     // changed.
2107     for (AbstractAttribute *AA : Worklist) {
2108       const auto &AAState = AA->getState();
2109       if (!AAState.isAtFixpoint())
2110         if (updateAA(*AA) == ChangeStatus::CHANGED)
2111           ChangedAAs.push_back(AA);
2112 
2113       // Use the InvalidAAs vector to propagate invalid states fast transitively
2114       // without requiring updates.
2115       if (!AAState.isValidState())
2116         InvalidAAs.insert(AA);
2117     }
2118 
2119     // Add attributes to the changed set if they have been created in the last
2120     // iteration.
2121     ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2122                       DG.SyntheticRoot.end());
2123 
2124     // Reset the work list and repopulate with the changed abstract attributes.
2125     // Note that dependent ones are added above.
2126     Worklist.clear();
2127     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2128     Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2129                     QueryAAsAwaitingUpdate.end());
2130     QueryAAsAwaitingUpdate.clear();
2131 
2132   } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2133 
2134   if (IterationCounter > MaxIterations && !Functions.empty()) {
2135     auto Remark = [&](OptimizationRemarkMissed ORM) {
2136       return ORM << "Attributor did not reach a fixpoint after "
2137                  << ore::NV("Iterations", MaxIterations) << " iterations.";
2138     };
2139     Function *F = Functions.front();
2140     emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2141   }
2142 
2143   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2144                     << IterationCounter << "/" << MaxIterations
2145                     << " iterations\n");
2146 
2147   // Reset abstract arguments not settled in a sound fixpoint by now. This
2148   // happens when we stopped the fixpoint iteration early. Note that only the
2149   // ones marked as "changed" *and* the ones transitively depending on them
2150   // need to be reverted to a pessimistic state. Others might not be in a
2151   // fixpoint state but we can use the optimistic results for them anyway.
2152   SmallPtrSet<AbstractAttribute *, 32> Visited;
2153   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2154     AbstractAttribute *ChangedAA = ChangedAAs[u];
2155     if (!Visited.insert(ChangedAA).second)
2156       continue;
2157 
2158     AbstractState &State = ChangedAA->getState();
2159     if (!State.isAtFixpoint()) {
2160       State.indicatePessimisticFixpoint();
2161 
2162       NumAttributesTimedOut++;
2163     }
2164 
2165     for (auto &DepIt : ChangedAA->Deps)
2166       ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2167     ChangedAA->Deps.clear();
2168   }
2169 
2170   LLVM_DEBUG({
2171     if (!Visited.empty())
2172       dbgs() << "\n[Attributor] Finalized " << Visited.size()
2173              << " abstract attributes.\n";
2174   });
2175 }
2176 
2177 void Attributor::registerForUpdate(AbstractAttribute &AA) {
2178   assert(AA.isQueryAA() &&
2179          "Non-query AAs should not be required to register for updates!");
2180   QueryAAsAwaitingUpdate.insert(&AA);
2181 }
2182 
2183 ChangeStatus Attributor::manifestAttributes() {
2184   TimeTraceScope TimeScope("Attributor::manifestAttributes");
2185   size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2186 
2187   unsigned NumManifested = 0;
2188   unsigned NumAtFixpoint = 0;
2189   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2190   for (auto &DepAA : DG.SyntheticRoot.Deps) {
2191     AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2192     AbstractState &State = AA->getState();
2193 
2194     // If there is not already a fixpoint reached, we can now take the
2195     // optimistic state. This is correct because we enforced a pessimistic one
2196     // on abstract attributes that were transitively dependent on a changed one
2197     // already above.
2198     if (!State.isAtFixpoint())
2199       State.indicateOptimisticFixpoint();
2200 
2201     // We must not manifest Attributes that use Callbase info.
2202     if (AA->hasCallBaseContext())
2203       continue;
2204     // If the state is invalid, we do not try to manifest it.
2205     if (!State.isValidState())
2206       continue;
2207 
2208     if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2209       continue;
2210 
2211     // Skip dead code.
2212     bool UsedAssumedInformation = false;
2213     if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2214                       /* CheckBBLivenessOnly */ true))
2215       continue;
2216     // Check if the manifest debug counter that allows skipping manifestation of
2217     // AAs
2218     if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2219       continue;
2220     // Manifest the state and record if we changed the IR.
2221     ChangeStatus LocalChange = AA->manifest(*this);
2222     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2223       AA->trackStatistics();
2224     LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2225                       << "\n");
2226 
2227     ManifestChange = ManifestChange | LocalChange;
2228 
2229     NumAtFixpoint++;
2230     NumManifested += (LocalChange == ChangeStatus::CHANGED);
2231   }
2232 
2233   (void)NumManifested;
2234   (void)NumAtFixpoint;
2235   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2236                     << " arguments while " << NumAtFixpoint
2237                     << " were in a valid fixpoint state\n");
2238 
2239   NumAttributesManifested += NumManifested;
2240   NumAttributesValidFixpoint += NumAtFixpoint;
2241 
2242   (void)NumFinalAAs;
2243   if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2244     auto DepIt = DG.SyntheticRoot.Deps.begin();
2245     for (unsigned u = 0; u < NumFinalAAs; ++u)
2246       ++DepIt;
2247     for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2248          ++u, ++DepIt) {
2249       errs() << "Unexpected abstract attribute: "
2250              << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2251              << cast<AbstractAttribute>(DepIt->getPointer())
2252                     ->getIRPosition()
2253                     .getAssociatedValue()
2254              << "\n";
2255     }
2256     llvm_unreachable("Expected the final number of abstract attributes to "
2257                      "remain unchanged!");
2258   }
2259 
2260   for (auto &It : AttrsMap) {
2261     AttributeList &AL = It.getSecond();
2262     const IRPosition &IRP =
2263         isa<Function>(It.getFirst())
2264             ? IRPosition::function(*cast<Function>(It.getFirst()))
2265             : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2266     IRP.setAttrList(AL);
2267   }
2268 
2269   return ManifestChange;
2270 }
2271 
2272 void Attributor::identifyDeadInternalFunctions() {
2273   // Early exit if we don't intend to delete functions.
2274   if (!Configuration.DeleteFns)
2275     return;
2276 
2277   // To avoid triggering an assertion in the lazy call graph we will not delete
2278   // any internal library functions. We should modify the assertion though and
2279   // allow internals to be deleted.
2280   const auto *TLI =
2281       isModulePass()
2282           ? nullptr
2283           : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
2284   LibFunc LF;
2285 
2286   // Identify dead internal functions and delete them. This happens outside
2287   // the other fixpoint analysis as we might treat potentially dead functions
2288   // as live to lower the number of iterations. If they happen to be dead, the
2289   // below fixpoint loop will identify and eliminate them.
2290 
2291   SmallVector<Function *, 8> InternalFns;
2292   for (Function *F : Functions)
2293     if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2294       InternalFns.push_back(F);
2295 
2296   SmallPtrSet<Function *, 8> LiveInternalFns;
2297   bool FoundLiveInternal = true;
2298   while (FoundLiveInternal) {
2299     FoundLiveInternal = false;
2300     for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
2301       Function *F = InternalFns[u];
2302       if (!F)
2303         continue;
2304 
2305       bool UsedAssumedInformation = false;
2306       if (checkForAllCallSites(
2307               [&](AbstractCallSite ACS) {
2308                 Function *Callee = ACS.getInstruction()->getFunction();
2309                 return ToBeDeletedFunctions.count(Callee) ||
2310                        (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2311                         !LiveInternalFns.count(Callee));
2312               },
2313               *F, true, nullptr, UsedAssumedInformation)) {
2314         continue;
2315       }
2316 
2317       LiveInternalFns.insert(F);
2318       InternalFns[u] = nullptr;
2319       FoundLiveInternal = true;
2320     }
2321   }
2322 
2323   for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
2324     if (Function *F = InternalFns[u])
2325       ToBeDeletedFunctions.insert(F);
2326 }
2327 
2328 ChangeStatus Attributor::cleanupIR() {
2329   TimeTraceScope TimeScope("Attributor::cleanupIR");
2330   // Delete stuff at the end to avoid invalid references and a nice order.
2331   LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2332                     << ToBeDeletedFunctions.size() << " functions and "
2333                     << ToBeDeletedBlocks.size() << " blocks and "
2334                     << ToBeDeletedInsts.size() << " instructions and "
2335                     << ToBeChangedValues.size() << " values and "
2336                     << ToBeChangedUses.size() << " uses. To insert "
2337                     << ToBeChangedToUnreachableInsts.size()
2338                     << " unreachables.\n"
2339                     << "Preserve manifest added " << ManifestAddedBlocks.size()
2340                     << " blocks\n");
2341 
2342   SmallVector<WeakTrackingVH, 32> DeadInsts;
2343   SmallVector<Instruction *, 32> TerminatorsToFold;
2344 
2345   auto ReplaceUse = [&](Use *U, Value *NewV) {
2346     Value *OldV = U->get();
2347 
2348     // If we plan to replace NewV we need to update it at this point.
2349     do {
2350       const auto &Entry = ToBeChangedValues.lookup(NewV);
2351       if (!get<0>(Entry))
2352         break;
2353       NewV = get<0>(Entry);
2354     } while (true);
2355 
2356     Instruction *I = dyn_cast<Instruction>(U->getUser());
2357     assert((!I || isRunOn(*I->getFunction())) &&
2358            "Cannot replace an instruction outside the current SCC!");
2359 
2360     // Do not replace uses in returns if the value is a must-tail call we will
2361     // not delete.
2362     if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2363       if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2364         if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2365           return;
2366       // If we rewrite a return and the new value is not an argument, strip the
2367       // `returned` attribute as it is wrong now.
2368       if (!isa<Argument>(NewV))
2369         for (auto &Arg : RI->getFunction()->args())
2370           Arg.removeAttr(Attribute::Returned);
2371     }
2372 
2373     LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2374                       << " instead of " << *OldV << "\n");
2375     U->set(NewV);
2376 
2377     if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2378       CGModifiedFunctions.insert(I->getFunction());
2379       if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2380           isInstructionTriviallyDead(I))
2381         DeadInsts.push_back(I);
2382     }
2383     if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2384       auto *CB = cast<CallBase>(U->getUser());
2385       if (CB->isArgOperand(U)) {
2386         unsigned Idx = CB->getArgOperandNo(U);
2387         CB->removeParamAttr(Idx, Attribute::NoUndef);
2388         auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2389         if (Callee && Callee->arg_size() > Idx)
2390           Callee->removeParamAttr(Idx, Attribute::NoUndef);
2391       }
2392     }
2393     if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2394       Instruction *UserI = cast<Instruction>(U->getUser());
2395       if (isa<UndefValue>(NewV)) {
2396         ToBeChangedToUnreachableInsts.insert(UserI);
2397       } else {
2398         TerminatorsToFold.push_back(UserI);
2399       }
2400     }
2401   };
2402 
2403   for (auto &It : ToBeChangedUses) {
2404     Use *U = It.first;
2405     Value *NewV = It.second;
2406     ReplaceUse(U, NewV);
2407   }
2408 
2409   SmallVector<Use *, 4> Uses;
2410   for (auto &It : ToBeChangedValues) {
2411     Value *OldV = It.first;
2412     auto [NewV, Done] = It.second;
2413     Uses.clear();
2414     for (auto &U : OldV->uses())
2415       if (Done || !U.getUser()->isDroppable())
2416         Uses.push_back(&U);
2417     for (Use *U : Uses) {
2418       if (auto *I = dyn_cast<Instruction>(U->getUser()))
2419         if (!isRunOn(*I->getFunction()))
2420           continue;
2421       ReplaceUse(U, NewV);
2422     }
2423   }
2424 
2425   for (const auto &V : InvokeWithDeadSuccessor)
2426     if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2427       assert(isRunOn(*II->getFunction()) &&
2428              "Cannot replace an invoke outside the current SCC!");
2429       bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2430       bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2431       bool Invoke2CallAllowed =
2432           !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2433       assert((UnwindBBIsDead || NormalBBIsDead) &&
2434              "Invoke does not have dead successors!");
2435       BasicBlock *BB = II->getParent();
2436       BasicBlock *NormalDestBB = II->getNormalDest();
2437       if (UnwindBBIsDead) {
2438         Instruction *NormalNextIP = &NormalDestBB->front();
2439         if (Invoke2CallAllowed) {
2440           changeToCall(II);
2441           NormalNextIP = BB->getTerminator();
2442         }
2443         if (NormalBBIsDead)
2444           ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2445       } else {
2446         assert(NormalBBIsDead && "Broken invariant!");
2447         if (!NormalDestBB->getUniquePredecessor())
2448           NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2449         ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2450       }
2451     }
2452   for (Instruction *I : TerminatorsToFold) {
2453     assert(isRunOn(*I->getFunction()) &&
2454            "Cannot replace a terminator outside the current SCC!");
2455     CGModifiedFunctions.insert(I->getFunction());
2456     ConstantFoldTerminator(I->getParent());
2457   }
2458   for (const auto &V : ToBeChangedToUnreachableInsts)
2459     if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2460       LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2461                         << "\n");
2462       assert(isRunOn(*I->getFunction()) &&
2463              "Cannot replace an instruction outside the current SCC!");
2464       CGModifiedFunctions.insert(I->getFunction());
2465       changeToUnreachable(I);
2466     }
2467 
2468   for (const auto &V : ToBeDeletedInsts) {
2469     if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2470       if (auto *CB = dyn_cast<CallBase>(I)) {
2471         assert((isa<IntrinsicInst>(CB) || isRunOn(*I->getFunction())) &&
2472                "Cannot delete an instruction outside the current SCC!");
2473         if (!isa<IntrinsicInst>(CB))
2474           Configuration.CGUpdater.removeCallSite(*CB);
2475       }
2476       I->dropDroppableUses();
2477       CGModifiedFunctions.insert(I->getFunction());
2478       if (!I->getType()->isVoidTy())
2479         I->replaceAllUsesWith(UndefValue::get(I->getType()));
2480       if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2481         DeadInsts.push_back(I);
2482       else
2483         I->eraseFromParent();
2484     }
2485   }
2486 
2487   llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2488 
2489   LLVM_DEBUG({
2490     dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2491     for (auto &I : DeadInsts)
2492       if (I)
2493         dbgs() << "  - " << *I << "\n";
2494   });
2495 
2496   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2497 
2498   if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2499     SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2500     ToBeDeletedBBs.reserve(NumDeadBlocks);
2501     for (BasicBlock *BB : ToBeDeletedBlocks) {
2502       assert(isRunOn(*BB->getParent()) &&
2503              "Cannot delete a block outside the current SCC!");
2504       CGModifiedFunctions.insert(BB->getParent());
2505       // Do not delete BBs added during manifests of AAs.
2506       if (ManifestAddedBlocks.contains(BB))
2507         continue;
2508       ToBeDeletedBBs.push_back(BB);
2509     }
2510     // Actually we do not delete the blocks but squash them into a single
2511     // unreachable but untangling branches that jump here is something we need
2512     // to do in a more generic way.
2513     detachDeadBlocks(ToBeDeletedBBs, nullptr);
2514   }
2515 
2516   identifyDeadInternalFunctions();
2517 
2518   // Rewrite the functions as requested during manifest.
2519   ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2520 
2521   for (Function *Fn : CGModifiedFunctions)
2522     if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2523       Configuration.CGUpdater.reanalyzeFunction(*Fn);
2524 
2525   for (Function *Fn : ToBeDeletedFunctions) {
2526     if (!Functions.count(Fn))
2527       continue;
2528     Configuration.CGUpdater.removeFunction(*Fn);
2529   }
2530 
2531   if (!ToBeChangedUses.empty())
2532     ManifestChange = ChangeStatus::CHANGED;
2533 
2534   if (!ToBeChangedToUnreachableInsts.empty())
2535     ManifestChange = ChangeStatus::CHANGED;
2536 
2537   if (!ToBeDeletedFunctions.empty())
2538     ManifestChange = ChangeStatus::CHANGED;
2539 
2540   if (!ToBeDeletedBlocks.empty())
2541     ManifestChange = ChangeStatus::CHANGED;
2542 
2543   if (!ToBeDeletedInsts.empty())
2544     ManifestChange = ChangeStatus::CHANGED;
2545 
2546   if (!InvokeWithDeadSuccessor.empty())
2547     ManifestChange = ChangeStatus::CHANGED;
2548 
2549   if (!DeadInsts.empty())
2550     ManifestChange = ChangeStatus::CHANGED;
2551 
2552   NumFnDeleted += ToBeDeletedFunctions.size();
2553 
2554   LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2555                     << " functions after manifest.\n");
2556 
2557 #ifdef EXPENSIVE_CHECKS
2558   for (Function *F : Functions) {
2559     if (ToBeDeletedFunctions.count(F))
2560       continue;
2561     assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2562   }
2563 #endif
2564 
2565   return ManifestChange;
2566 }
2567 
2568 ChangeStatus Attributor::run() {
2569   TimeTraceScope TimeScope("Attributor::run");
2570   AttributorCallGraph ACallGraph(*this);
2571 
2572   if (PrintCallGraph)
2573     ACallGraph.populateAll();
2574 
2575   Phase = AttributorPhase::UPDATE;
2576   runTillFixpoint();
2577 
2578   // dump graphs on demand
2579   if (DumpDepGraph)
2580     DG.dumpGraph();
2581 
2582   if (ViewDepGraph)
2583     DG.viewGraph();
2584 
2585   if (PrintDependencies)
2586     DG.print();
2587 
2588   Phase = AttributorPhase::MANIFEST;
2589   ChangeStatus ManifestChange = manifestAttributes();
2590 
2591   Phase = AttributorPhase::CLEANUP;
2592   ChangeStatus CleanupChange = cleanupIR();
2593 
2594   if (PrintCallGraph)
2595     ACallGraph.print();
2596 
2597   return ManifestChange | CleanupChange;
2598 }
2599 
2600 ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2601   TimeTraceScope TimeScope("updateAA", [&]() {
2602     return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2603   });
2604   assert(Phase == AttributorPhase::UPDATE &&
2605          "We can update AA only in the update stage!");
2606 
2607   // Use a new dependence vector for this update.
2608   DependenceVector DV;
2609   DependenceStack.push_back(&DV);
2610 
2611   auto &AAState = AA.getState();
2612   ChangeStatus CS = ChangeStatus::UNCHANGED;
2613   bool UsedAssumedInformation = false;
2614   if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2615                      /* CheckBBLivenessOnly */ true))
2616     CS = AA.update(*this);
2617 
2618   if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2619     // If the AA did not rely on outside information but changed, we run it
2620     // again to see if it found a fixpoint. Most AAs do but we don't require
2621     // them to. Hence, it might take the AA multiple iterations to get to a
2622     // fixpoint even if it does not rely on outside information, which is fine.
2623     ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
2624     if (CS == ChangeStatus::CHANGED)
2625       RerunCS = AA.update(*this);
2626 
2627     // If the attribute did not change during the run or rerun, and it still did
2628     // not query any non-fix information, the state will not change and we can
2629     // indicate that right at this point.
2630     if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2631       AAState.indicateOptimisticFixpoint();
2632   }
2633 
2634   if (!AAState.isAtFixpoint())
2635     rememberDependences();
2636 
2637   // Verify the stack was used properly, that is we pop the dependence vector we
2638   // put there earlier.
2639   DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2640   (void)PoppedDV;
2641   assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2642 
2643   return CS;
2644 }
2645 
2646 void Attributor::createShallowWrapper(Function &F) {
2647   assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2648 
2649   Module &M = *F.getParent();
2650   LLVMContext &Ctx = M.getContext();
2651   FunctionType *FnTy = F.getFunctionType();
2652 
2653   Function *Wrapper =
2654       Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2655   F.setName(""); // set the inside function anonymous
2656   M.getFunctionList().insert(F.getIterator(), Wrapper);
2657 
2658   F.setLinkage(GlobalValue::InternalLinkage);
2659 
2660   F.replaceAllUsesWith(Wrapper);
2661   assert(F.use_empty() && "Uses remained after wrapper was created!");
2662 
2663   // Move the COMDAT section to the wrapper.
2664   // TODO: Check if we need to keep it for F as well.
2665   Wrapper->setComdat(F.getComdat());
2666   F.setComdat(nullptr);
2667 
2668   // Copy all metadata and attributes but keep them on F as well.
2669   SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2670   F.getAllMetadata(MDs);
2671   for (auto MDIt : MDs)
2672     Wrapper->addMetadata(MDIt.first, *MDIt.second);
2673   Wrapper->setAttributes(F.getAttributes());
2674 
2675   // Create the call in the wrapper.
2676   BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2677 
2678   SmallVector<Value *, 8> Args;
2679   Argument *FArgIt = F.arg_begin();
2680   for (Argument &Arg : Wrapper->args()) {
2681     Args.push_back(&Arg);
2682     Arg.setName((FArgIt++)->getName());
2683   }
2684 
2685   CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2686   CI->setTailCall(true);
2687   CI->addFnAttr(Attribute::NoInline);
2688   ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2689 
2690   NumFnShallowWrappersCreated++;
2691 }
2692 
2693 bool Attributor::isInternalizable(Function &F) {
2694   if (F.isDeclaration() || F.hasLocalLinkage() ||
2695       GlobalValue::isInterposableLinkage(F.getLinkage()))
2696     return false;
2697   return true;
2698 }
2699 
2700 Function *Attributor::internalizeFunction(Function &F, bool Force) {
2701   if (!AllowDeepWrapper && !Force)
2702     return nullptr;
2703   if (!isInternalizable(F))
2704     return nullptr;
2705 
2706   SmallPtrSet<Function *, 2> FnSet = {&F};
2707   DenseMap<Function *, Function *> InternalizedFns;
2708   internalizeFunctions(FnSet, InternalizedFns);
2709 
2710   return InternalizedFns[&F];
2711 }
2712 
2713 bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2714                                       DenseMap<Function *, Function *> &FnMap) {
2715   for (Function *F : FnSet)
2716     if (!Attributor::isInternalizable(*F))
2717       return false;
2718 
2719   FnMap.clear();
2720   // Generate the internalized version of each function.
2721   for (Function *F : FnSet) {
2722     Module &M = *F->getParent();
2723     FunctionType *FnTy = F->getFunctionType();
2724 
2725     // Create a copy of the current function
2726     Function *Copied =
2727         Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2728                          F->getName() + ".internalized");
2729     ValueToValueMapTy VMap;
2730     auto *NewFArgIt = Copied->arg_begin();
2731     for (auto &Arg : F->args()) {
2732       auto ArgName = Arg.getName();
2733       NewFArgIt->setName(ArgName);
2734       VMap[&Arg] = &(*NewFArgIt++);
2735     }
2736     SmallVector<ReturnInst *, 8> Returns;
2737 
2738     // Copy the body of the original function to the new one
2739     CloneFunctionInto(Copied, F, VMap,
2740                       CloneFunctionChangeType::LocalChangesOnly, Returns);
2741 
2742     // Set the linakage and visibility late as CloneFunctionInto has some
2743     // implicit requirements.
2744     Copied->setVisibility(GlobalValue::DefaultVisibility);
2745     Copied->setLinkage(GlobalValue::PrivateLinkage);
2746 
2747     // Copy metadata
2748     SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2749     F->getAllMetadata(MDs);
2750     for (auto MDIt : MDs)
2751       if (!Copied->hasMetadata())
2752         Copied->addMetadata(MDIt.first, *MDIt.second);
2753 
2754     M.getFunctionList().insert(F->getIterator(), Copied);
2755     Copied->setDSOLocal(true);
2756     FnMap[F] = Copied;
2757   }
2758 
2759   // Replace all uses of the old function with the new internalized function
2760   // unless the caller is a function that was just internalized.
2761   for (Function *F : FnSet) {
2762     auto &InternalizedFn = FnMap[F];
2763     auto IsNotInternalized = [&](Use &U) -> bool {
2764       if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2765         return !FnMap.lookup(CB->getCaller());
2766       return false;
2767     };
2768     F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2769   }
2770 
2771   return true;
2772 }
2773 
2774 bool Attributor::isValidFunctionSignatureRewrite(
2775     Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2776 
2777   if (!Configuration.RewriteSignatures)
2778     return false;
2779 
2780   Function *Fn = Arg.getParent();
2781   auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2782     // Forbid the call site to cast the function return type. If we need to
2783     // rewrite these functions we need to re-create a cast for the new call site
2784     // (if the old had uses).
2785     if (!ACS.getCalledFunction() ||
2786         ACS.getInstruction()->getType() !=
2787             ACS.getCalledFunction()->getReturnType())
2788       return false;
2789     if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2790         Fn->getType())
2791       return false;
2792     if (ACS.getNumArgOperands() != Fn->arg_size())
2793       return false;
2794     // Forbid must-tail calls for now.
2795     return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2796   };
2797 
2798   // Avoid var-arg functions for now.
2799   if (Fn->isVarArg()) {
2800     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2801     return false;
2802   }
2803 
2804   // Avoid functions with complicated argument passing semantics.
2805   AttributeList FnAttributeList = Fn->getAttributes();
2806   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2807       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2808       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2809       FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2810     LLVM_DEBUG(
2811         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2812     return false;
2813   }
2814 
2815   // Avoid callbacks for now.
2816   bool UsedAssumedInformation = false;
2817   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2818                             UsedAssumedInformation,
2819                             /* CheckPotentiallyDead */ true)) {
2820     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2821     return false;
2822   }
2823 
2824   auto InstPred = [](Instruction &I) {
2825     if (auto *CI = dyn_cast<CallInst>(&I))
2826       return !CI->isMustTailCall();
2827     return true;
2828   };
2829 
2830   // Forbid must-tail calls for now.
2831   // TODO:
2832   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2833   if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2834                                    nullptr, {Instruction::Call},
2835                                    UsedAssumedInformation)) {
2836     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2837     return false;
2838   }
2839 
2840   return true;
2841 }
2842 
2843 bool Attributor::registerFunctionSignatureRewrite(
2844     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2845     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2846     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2847   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2848                     << Arg.getParent()->getName() << " with "
2849                     << ReplacementTypes.size() << " replacements\n");
2850   assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2851          "Cannot register an invalid rewrite");
2852 
2853   Function *Fn = Arg.getParent();
2854   SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2855       ArgumentReplacementMap[Fn];
2856   if (ARIs.empty())
2857     ARIs.resize(Fn->arg_size());
2858 
2859   // If we have a replacement already with less than or equal new arguments,
2860   // ignore this request.
2861   std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2862   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2863     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2864     return false;
2865   }
2866 
2867   // If we have a replacement already but we like the new one better, delete
2868   // the old.
2869   ARI.reset();
2870 
2871   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2872                     << Arg.getParent()->getName() << " with "
2873                     << ReplacementTypes.size() << " replacements\n");
2874 
2875   // Remember the replacement.
2876   ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2877                                         std::move(CalleeRepairCB),
2878                                         std::move(ACSRepairCB)));
2879 
2880   return true;
2881 }
2882 
2883 bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2884   bool Result = true;
2885 #ifndef NDEBUG
2886   if (SeedAllowList.size() != 0)
2887     Result = llvm::is_contained(SeedAllowList, AA.getName());
2888   Function *Fn = AA.getAnchorScope();
2889   if (FunctionSeedAllowList.size() != 0 && Fn)
2890     Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2891 #endif
2892   return Result;
2893 }
2894 
2895 ChangeStatus Attributor::rewriteFunctionSignatures(
2896     SmallSetVector<Function *, 8> &ModifiedFns) {
2897   ChangeStatus Changed = ChangeStatus::UNCHANGED;
2898 
2899   for (auto &It : ArgumentReplacementMap) {
2900     Function *OldFn = It.getFirst();
2901 
2902     // Deleted functions do not require rewrites.
2903     if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2904       continue;
2905 
2906     const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2907         It.getSecond();
2908     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2909 
2910     SmallVector<Type *, 16> NewArgumentTypes;
2911     SmallVector<AttributeSet, 16> NewArgumentAttributes;
2912 
2913     // Collect replacement argument types and copy over existing attributes.
2914     AttributeList OldFnAttributeList = OldFn->getAttributes();
2915     for (Argument &Arg : OldFn->args()) {
2916       if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2917               ARIs[Arg.getArgNo()]) {
2918         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2919                                 ARI->ReplacementTypes.end());
2920         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2921                                      AttributeSet());
2922       } else {
2923         NewArgumentTypes.push_back(Arg.getType());
2924         NewArgumentAttributes.push_back(
2925             OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
2926       }
2927     }
2928 
2929     uint64_t LargestVectorWidth = 0;
2930     for (auto *I : NewArgumentTypes)
2931       if (auto *VT = dyn_cast<llvm::VectorType>(I))
2932         LargestVectorWidth =
2933             std::max(LargestVectorWidth,
2934                      VT->getPrimitiveSizeInBits().getKnownMinValue());
2935 
2936     FunctionType *OldFnTy = OldFn->getFunctionType();
2937     Type *RetTy = OldFnTy->getReturnType();
2938 
2939     // Construct the new function type using the new arguments types.
2940     FunctionType *NewFnTy =
2941         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
2942 
2943     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
2944                       << "' from " << *OldFn->getFunctionType() << " to "
2945                       << *NewFnTy << "\n");
2946 
2947     // Create the new function body and insert it into the module.
2948     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
2949                                        OldFn->getAddressSpace(), "");
2950     Functions.insert(NewFn);
2951     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
2952     NewFn->takeName(OldFn);
2953     NewFn->copyAttributesFrom(OldFn);
2954 
2955     // Patch the pointer to LLVM function in debug info descriptor.
2956     NewFn->setSubprogram(OldFn->getSubprogram());
2957     OldFn->setSubprogram(nullptr);
2958 
2959     // Recompute the parameter attributes list based on the new arguments for
2960     // the function.
2961     LLVMContext &Ctx = OldFn->getContext();
2962     NewFn->setAttributes(AttributeList::get(
2963         Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
2964         NewArgumentAttributes));
2965     AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
2966 
2967     // Since we have now created the new function, splice the body of the old
2968     // function right into the new function, leaving the old rotting hulk of the
2969     // function empty.
2970     NewFn->splice(NewFn->begin(), OldFn);
2971 
2972     // Fixup block addresses to reference new function.
2973     SmallVector<BlockAddress *, 8u> BlockAddresses;
2974     for (User *U : OldFn->users())
2975       if (auto *BA = dyn_cast<BlockAddress>(U))
2976         BlockAddresses.push_back(BA);
2977     for (auto *BA : BlockAddresses)
2978       BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
2979 
2980     // Set of all "call-like" instructions that invoke the old function mapped
2981     // to their new replacements.
2982     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
2983 
2984     // Callback to create a new "call-like" instruction for a given one.
2985     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
2986       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
2987       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
2988 
2989       // Collect the new argument operands for the replacement call site.
2990       SmallVector<Value *, 16> NewArgOperands;
2991       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
2992       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
2993         unsigned NewFirstArgNum = NewArgOperands.size();
2994         (void)NewFirstArgNum; // only used inside assert.
2995         if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2996                 ARIs[OldArgNum]) {
2997           if (ARI->ACSRepairCB)
2998             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
2999           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3000                      NewArgOperands.size() &&
3001                  "ACS repair callback did not provide as many operand as new "
3002                  "types were registered!");
3003           // TODO: Exose the attribute set to the ACS repair callback
3004           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3005                                          AttributeSet());
3006         } else {
3007           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3008           NewArgOperandAttributes.push_back(
3009               OldCallAttributeList.getParamAttrs(OldArgNum));
3010         }
3011       }
3012 
3013       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3014              "Mismatch # argument operands vs. # argument operand attributes!");
3015       assert(NewArgOperands.size() == NewFn->arg_size() &&
3016              "Mismatch # argument operands vs. # function arguments!");
3017 
3018       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3019       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3020 
3021       // Create a new call or invoke instruction to replace the old one.
3022       CallBase *NewCB;
3023       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3024         NewCB =
3025             InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
3026                                NewArgOperands, OperandBundleDefs, "", OldCB);
3027       } else {
3028         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3029                                        "", OldCB);
3030         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3031         NewCB = NewCI;
3032       }
3033 
3034       // Copy over various properties and the new attributes.
3035       NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3036       NewCB->setCallingConv(OldCB->getCallingConv());
3037       NewCB->takeName(OldCB);
3038       NewCB->setAttributes(AttributeList::get(
3039           Ctx, OldCallAttributeList.getFnAttrs(),
3040           OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3041 
3042       AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
3043                                                     LargestVectorWidth);
3044 
3045       CallSitePairs.push_back({OldCB, NewCB});
3046       return true;
3047     };
3048 
3049     // Use the CallSiteReplacementCreator to create replacement call sites.
3050     bool UsedAssumedInformation = false;
3051     bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3052                                         true, nullptr, UsedAssumedInformation,
3053                                         /* CheckPotentiallyDead */ true);
3054     (void)Success;
3055     assert(Success && "Assumed call site replacement to succeed!");
3056 
3057     // Rewire the arguments.
3058     Argument *OldFnArgIt = OldFn->arg_begin();
3059     Argument *NewFnArgIt = NewFn->arg_begin();
3060     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3061          ++OldArgNum, ++OldFnArgIt) {
3062       if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3063               ARIs[OldArgNum]) {
3064         if (ARI->CalleeRepairCB)
3065           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3066         if (ARI->ReplacementTypes.empty())
3067           OldFnArgIt->replaceAllUsesWith(
3068               PoisonValue::get(OldFnArgIt->getType()));
3069         NewFnArgIt += ARI->ReplacementTypes.size();
3070       } else {
3071         NewFnArgIt->takeName(&*OldFnArgIt);
3072         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3073         ++NewFnArgIt;
3074       }
3075     }
3076 
3077     // Eliminate the instructions *after* we visited all of them.
3078     for (auto &CallSitePair : CallSitePairs) {
3079       CallBase &OldCB = *CallSitePair.first;
3080       CallBase &NewCB = *CallSitePair.second;
3081       assert(OldCB.getType() == NewCB.getType() &&
3082              "Cannot handle call sites with different types!");
3083       ModifiedFns.insert(OldCB.getFunction());
3084       Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
3085       OldCB.replaceAllUsesWith(&NewCB);
3086       OldCB.eraseFromParent();
3087     }
3088 
3089     // Replace the function in the call graph (if any).
3090     Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3091 
3092     // If the old function was modified and needed to be reanalyzed, the new one
3093     // does now.
3094     if (ModifiedFns.remove(OldFn))
3095       ModifiedFns.insert(NewFn);
3096 
3097     Changed = ChangeStatus::CHANGED;
3098   }
3099 
3100   return Changed;
3101 }
3102 
3103 void InformationCache::initializeInformationCache(const Function &CF,
3104                                                   FunctionInfo &FI) {
3105   // As we do not modify the function here we can remove the const
3106   // withouth breaking implicit assumptions. At the end of the day, we could
3107   // initialize the cache eagerly which would look the same to the users.
3108   Function &F = const_cast<Function &>(CF);
3109 
3110   // Walk all instructions to find interesting instructions that might be
3111   // queried by abstract attributes during their initialization or update.
3112   // This has to happen before we create attributes.
3113 
3114   DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3115 
3116   // Add \p V to the assume uses map which track the number of uses outside of
3117   // "visited" assumes. If no outside uses are left the value is added to the
3118   // assume only use vector.
3119   auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3120     SmallVector<const Instruction *> Worklist;
3121     if (auto *I = dyn_cast<Instruction>(&V))
3122       Worklist.push_back(I);
3123     while (!Worklist.empty()) {
3124       const Instruction *I = Worklist.pop_back_val();
3125       std::optional<short> &NumUses = AssumeUsesMap[I];
3126       if (!NumUses)
3127         NumUses = I->getNumUses();
3128       NumUses = *NumUses - /* this assume */ 1;
3129       if (*NumUses != 0)
3130         continue;
3131       AssumeOnlyValues.insert(I);
3132       for (const Value *Op : I->operands())
3133         if (auto *OpI = dyn_cast<Instruction>(Op))
3134           Worklist.push_back(OpI);
3135     }
3136   };
3137 
3138   for (Instruction &I : instructions(&F)) {
3139     bool IsInterestingOpcode = false;
3140 
3141     // To allow easy access to all instructions in a function with a given
3142     // opcode we store them in the InfoCache. As not all opcodes are interesting
3143     // to concrete attributes we only cache the ones that are as identified in
3144     // the following switch.
3145     // Note: There are no concrete attributes now so this is initially empty.
3146     switch (I.getOpcode()) {
3147     default:
3148       assert(!isa<CallBase>(&I) &&
3149              "New call base instruction type needs to be known in the "
3150              "Attributor.");
3151       break;
3152     case Instruction::Call:
3153       // Calls are interesting on their own, additionally:
3154       // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3155       // For `must-tail` calls we remember the caller and callee.
3156       if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3157         AssumeOnlyValues.insert(Assume);
3158         fillMapFromAssume(*Assume, KnowledgeMap);
3159         AddToAssumeUsesMap(*Assume->getArgOperand(0));
3160       } else if (cast<CallInst>(I).isMustTailCall()) {
3161         FI.ContainsMustTailCall = true;
3162         if (auto *Callee = dyn_cast_if_present<Function>(
3163                 cast<CallInst>(I).getCalledOperand()))
3164           getFunctionInfo(*Callee).CalledViaMustTail = true;
3165       }
3166       [[fallthrough]];
3167     case Instruction::CallBr:
3168     case Instruction::Invoke:
3169     case Instruction::CleanupRet:
3170     case Instruction::CatchSwitch:
3171     case Instruction::AtomicRMW:
3172     case Instruction::AtomicCmpXchg:
3173     case Instruction::Br:
3174     case Instruction::Resume:
3175     case Instruction::Ret:
3176     case Instruction::Load:
3177       // The alignment of a pointer is interesting for loads.
3178     case Instruction::Store:
3179       // The alignment of a pointer is interesting for stores.
3180     case Instruction::Alloca:
3181     case Instruction::AddrSpaceCast:
3182       IsInterestingOpcode = true;
3183     }
3184     if (IsInterestingOpcode) {
3185       auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3186       if (!Insts)
3187         Insts = new (Allocator) InstructionVectorTy();
3188       Insts->push_back(&I);
3189     }
3190     if (I.mayReadOrWriteMemory())
3191       FI.RWInsts.push_back(&I);
3192   }
3193 
3194   if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3195       isInlineViable(F).isSuccess())
3196     InlineableFunctions.insert(&F);
3197 }
3198 
3199 InformationCache::FunctionInfo::~FunctionInfo() {
3200   // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3201   // manually destroy them.
3202   for (auto &It : OpcodeInstMap)
3203     It.getSecond()->~InstructionVectorTy();
3204 }
3205 
3206 void Attributor::recordDependence(const AbstractAttribute &FromAA,
3207                                   const AbstractAttribute &ToAA,
3208                                   DepClassTy DepClass) {
3209   if (DepClass == DepClassTy::NONE)
3210     return;
3211   // If we are outside of an update, thus before the actual fixpoint iteration
3212   // started (= when we create AAs), we do not track dependences because we will
3213   // put all AAs into the initial worklist anyway.
3214   if (DependenceStack.empty())
3215     return;
3216   if (FromAA.getState().isAtFixpoint())
3217     return;
3218   DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3219 }
3220 
3221 void Attributor::rememberDependences() {
3222   assert(!DependenceStack.empty() && "No dependences to remember!");
3223 
3224   for (DepInfo &DI : *DependenceStack.back()) {
3225     assert((DI.DepClass == DepClassTy::REQUIRED ||
3226             DI.DepClass == DepClassTy::OPTIONAL) &&
3227            "Expected required or optional dependence (1 bit)!");
3228     auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3229     DepAAs.insert(AbstractAttribute::DepTy(
3230         const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3231   }
3232 }
3233 
3234 template <Attribute::AttrKind AK, typename AAType>
3235 void Attributor::checkAndQueryIRAttr(const IRPosition &IRP,
3236                                      AttributeSet Attrs) {
3237   bool IsKnown;
3238   if (!Attrs.hasAttribute(AK))
3239     if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3240                                   IsKnown))
3241       getOrCreateAAFor<AAType>(IRP);
3242 }
3243 
3244 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
3245   if (!VisitedFunctions.insert(&F).second)
3246     return;
3247   if (F.isDeclaration())
3248     return;
3249 
3250   // In non-module runs we need to look at the call sites of a function to
3251   // determine if it is part of a must-tail call edge. This will influence what
3252   // attributes we can derive.
3253   InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3254   if (!isModulePass() && !FI.CalledViaMustTail) {
3255     for (const Use &U : F.uses())
3256       if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3257         if (CB->isCallee(&U) && CB->isMustTailCall())
3258           FI.CalledViaMustTail = true;
3259   }
3260 
3261   IRPosition FPos = IRPosition::function(F);
3262   bool IsIPOAmendable = isFunctionIPOAmendable(F);
3263   auto Attrs = F.getAttributes();
3264   auto FnAttrs = Attrs.getFnAttrs();
3265 
3266   // Check for dead BasicBlocks in every function.
3267   // We need dead instruction detection because we do not want to deal with
3268   // broken IR in which SSA rules do not apply.
3269   getOrCreateAAFor<AAIsDead>(FPos);
3270 
3271   // Every function might contain instructions that cause "undefined
3272   // behavior".
3273   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3274 
3275   // Every function might be applicable for Heap-To-Stack conversion.
3276   if (EnableHeapToStack)
3277     getOrCreateAAFor<AAHeapToStack>(FPos);
3278 
3279   // Every function might be "must-progress".
3280   checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3281 
3282   // Every function might be "no-free".
3283   checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3284 
3285   // Every function might be "will-return".
3286   checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3287 
3288   // Everything that is visible from the outside (=function, argument, return
3289   // positions), cannot be changed if the function is not IPO amendable. We can
3290   // however analyse the code inside.
3291   if (IsIPOAmendable) {
3292 
3293     // Every function can be nounwind.
3294     checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3295 
3296     // Every function might be marked "nosync"
3297     checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3298 
3299     // Every function might be "no-return".
3300     checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3301 
3302     // Every function might be "no-recurse".
3303     checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3304 
3305     // Every function can be "non-convergent".
3306     if (Attrs.hasFnAttr(Attribute::Convergent))
3307       getOrCreateAAFor<AANonConvergent>(FPos);
3308 
3309     // Every function might be "readnone/readonly/writeonly/...".
3310     getOrCreateAAFor<AAMemoryBehavior>(FPos);
3311 
3312     // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3313     getOrCreateAAFor<AAMemoryLocation>(FPos);
3314 
3315     // Every function can track active assumptions.
3316     getOrCreateAAFor<AAAssumptionInfo>(FPos);
3317 
3318     // Return attributes are only appropriate if the return type is non void.
3319     Type *ReturnType = F.getReturnType();
3320     if (!ReturnType->isVoidTy()) {
3321       IRPosition RetPos = IRPosition::returned(F);
3322       AttributeSet RetAttrs = Attrs.getRetAttrs();
3323 
3324       // Every returned value might be dead.
3325       getOrCreateAAFor<AAIsDead>(RetPos);
3326 
3327       // Every function might be simplified.
3328       bool UsedAssumedInformation = false;
3329       getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3330                            AA::Intraprocedural);
3331 
3332       // Every returned value might be marked noundef.
3333       checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3334 
3335       if (ReturnType->isPointerTy()) {
3336 
3337         // Every function with pointer return type might be marked align.
3338         getOrCreateAAFor<AAAlign>(RetPos);
3339 
3340         // Every function with pointer return type might be marked nonnull.
3341         checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3342 
3343         // Every function with pointer return type might be marked noalias.
3344         checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3345 
3346         // Every function with pointer return type might be marked
3347         // dereferenceable.
3348         getOrCreateAAFor<AADereferenceable>(RetPos);
3349       } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3350         getOrCreateAAFor<AANoFPClass>(RetPos);
3351       }
3352     }
3353   }
3354 
3355   for (Argument &Arg : F.args()) {
3356     IRPosition ArgPos = IRPosition::argument(Arg);
3357     auto ArgNo = Arg.getArgNo();
3358     AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3359 
3360     if (!IsIPOAmendable) {
3361       if (Arg.getType()->isPointerTy())
3362         // Every argument with pointer type might be marked nofree.
3363         checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3364       continue;
3365     }
3366 
3367     // Every argument might be simplified. We have to go through the
3368     // Attributor interface though as outside AAs can register custom
3369     // simplification callbacks.
3370     bool UsedAssumedInformation = false;
3371     getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3372                          AA::Intraprocedural);
3373 
3374     // Every argument might be dead.
3375     getOrCreateAAFor<AAIsDead>(ArgPos);
3376 
3377     // Every argument might be marked noundef.
3378     checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3379 
3380     if (Arg.getType()->isPointerTy()) {
3381       // Every argument with pointer type might be marked nonnull.
3382       checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3383 
3384       // Every argument with pointer type might be marked noalias.
3385       checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3386 
3387       // Every argument with pointer type might be marked dereferenceable.
3388       getOrCreateAAFor<AADereferenceable>(ArgPos);
3389 
3390       // Every argument with pointer type might be marked align.
3391       getOrCreateAAFor<AAAlign>(ArgPos);
3392 
3393       // Every argument with pointer type might be marked nocapture.
3394       checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(ArgPos, ArgAttrs);
3395 
3396       // Every argument with pointer type might be marked
3397       // "readnone/readonly/writeonly/..."
3398       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3399 
3400       // Every argument with pointer type might be marked nofree.
3401       checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3402 
3403       // Every argument with pointer type might be privatizable (or
3404       // promotable)
3405       getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3406     } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3407       getOrCreateAAFor<AANoFPClass>(ArgPos);
3408     }
3409   }
3410 
3411   auto CallSitePred = [&](Instruction &I) -> bool {
3412     auto &CB = cast<CallBase>(I);
3413     IRPosition CBInstPos = IRPosition::inst(CB);
3414     IRPosition CBFnPos = IRPosition::callsite_function(CB);
3415 
3416     // Call sites might be dead if they do not have side effects and no live
3417     // users. The return value might be dead if there are no live users.
3418     getOrCreateAAFor<AAIsDead>(CBInstPos);
3419 
3420     Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3421     // TODO: Even if the callee is not known now we might be able to simplify
3422     //       the call/callee.
3423     if (!Callee)
3424       return true;
3425 
3426     // Every call site can track active assumptions.
3427     getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3428 
3429     // Skip declarations except if annotations on their call sites were
3430     // explicitly requested.
3431     if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3432         !Callee->hasMetadata(LLVMContext::MD_callback))
3433       return true;
3434 
3435     if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3436       IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3437       bool UsedAssumedInformation = false;
3438       getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3439                            AA::Intraprocedural);
3440 
3441       if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3442         getOrCreateAAFor<AANoFPClass>(CBInstPos);
3443     }
3444 
3445     const AttributeList &CBAttrs = CBFnPos.getAttrList();
3446     for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3447 
3448       IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);
3449       AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3450 
3451       // Every call site argument might be dead.
3452       getOrCreateAAFor<AAIsDead>(CBArgPos);
3453 
3454       // Call site argument might be simplified. We have to go through the
3455       // Attributor interface though as outside AAs can register custom
3456       // simplification callbacks.
3457       bool UsedAssumedInformation = false;
3458       getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3459                            AA::Intraprocedural);
3460 
3461       // Every call site argument might be marked "noundef".
3462       checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3463 
3464       Type *ArgTy = CB.getArgOperand(I)->getType();
3465 
3466       if (!ArgTy->isPointerTy()) {
3467         if (AttributeFuncs::isNoFPClassCompatibleType(ArgTy))
3468           getOrCreateAAFor<AANoFPClass>(CBArgPos);
3469 
3470         continue;
3471       }
3472 
3473       // Call site argument attribute "non-null".
3474       checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3475 
3476       // Call site argument attribute "nocapture".
3477       checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(CBArgPos,
3478                                                              CBArgAttrs);
3479 
3480       // Call site argument attribute "no-alias".
3481       checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3482 
3483       // Call site argument attribute "dereferenceable".
3484       getOrCreateAAFor<AADereferenceable>(CBArgPos);
3485 
3486       // Call site argument attribute "align".
3487       getOrCreateAAFor<AAAlign>(CBArgPos);
3488 
3489       // Call site argument attribute
3490       // "readnone/readonly/writeonly/..."
3491       if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3492         getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3493 
3494       // Call site argument attribute "nofree".
3495       checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3496     }
3497     return true;
3498   };
3499 
3500   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3501   bool Success;
3502   bool UsedAssumedInformation = false;
3503   Success = checkForAllInstructionsImpl(
3504       nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3505       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3506        (unsigned)Instruction::Call},
3507       UsedAssumedInformation);
3508   (void)Success;
3509   assert(Success && "Expected the check call to be successful!");
3510 
3511   auto LoadStorePred = [&](Instruction &I) -> bool {
3512     if (auto *LI = dyn_cast<LoadInst>(&I)) {
3513       getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3514       if (SimplifyAllLoads)
3515         getAssumedSimplified(IRPosition::value(I), nullptr,
3516                              UsedAssumedInformation, AA::Intraprocedural);
3517       getOrCreateAAFor<AAAddressSpace>(
3518           IRPosition::value(*LI->getPointerOperand()));
3519     } else {
3520       auto &SI = cast<StoreInst>(I);
3521       getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3522       getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3523                            UsedAssumedInformation, AA::Intraprocedural);
3524       getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3525       getOrCreateAAFor<AAAddressSpace>(
3526           IRPosition::value(*SI.getPointerOperand()));
3527     }
3528     return true;
3529   };
3530   Success = checkForAllInstructionsImpl(
3531       nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3532       {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3533       UsedAssumedInformation);
3534   (void)Success;
3535   assert(Success && "Expected the check call to be successful!");
3536 }
3537 
3538 /// Helpers to ease debugging through output streams and print calls.
3539 ///
3540 ///{
3541 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3542   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3543 }
3544 
3545 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3546   switch (AP) {
3547   case IRPosition::IRP_INVALID:
3548     return OS << "inv";
3549   case IRPosition::IRP_FLOAT:
3550     return OS << "flt";
3551   case IRPosition::IRP_RETURNED:
3552     return OS << "fn_ret";
3553   case IRPosition::IRP_CALL_SITE_RETURNED:
3554     return OS << "cs_ret";
3555   case IRPosition::IRP_FUNCTION:
3556     return OS << "fn";
3557   case IRPosition::IRP_CALL_SITE:
3558     return OS << "cs";
3559   case IRPosition::IRP_ARGUMENT:
3560     return OS << "arg";
3561   case IRPosition::IRP_CALL_SITE_ARGUMENT:
3562     return OS << "cs_arg";
3563   }
3564   llvm_unreachable("Unknown attribute position!");
3565 }
3566 
3567 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3568   const Value &AV = Pos.getAssociatedValue();
3569   OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3570      << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3571 
3572   if (Pos.hasCallBaseContext())
3573     OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3574   return OS << "}";
3575 }
3576 
3577 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3578   OS << "range-state(" << S.getBitWidth() << ")<";
3579   S.getKnown().print(OS);
3580   OS << " / ";
3581   S.getAssumed().print(OS);
3582   OS << ">";
3583 
3584   return OS << static_cast<const AbstractState &>(S);
3585 }
3586 
3587 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3588   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3589 }
3590 
3591 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3592   AA.print(OS);
3593   return OS;
3594 }
3595 
3596 raw_ostream &llvm::operator<<(raw_ostream &OS,
3597                               const PotentialConstantIntValuesState &S) {
3598   OS << "set-state(< {";
3599   if (!S.isValidState())
3600     OS << "full-set";
3601   else {
3602     for (const auto &It : S.getAssumedSet())
3603       OS << It << ", ";
3604     if (S.undefIsContained())
3605       OS << "undef ";
3606   }
3607   OS << "} >)";
3608 
3609   return OS;
3610 }
3611 
3612 raw_ostream &llvm::operator<<(raw_ostream &OS,
3613                               const PotentialLLVMValuesState &S) {
3614   OS << "set-state(< {";
3615   if (!S.isValidState())
3616     OS << "full-set";
3617   else {
3618     for (const auto &It : S.getAssumedSet()) {
3619       if (auto *F = dyn_cast<Function>(It.first.getValue()))
3620         OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3621       else
3622         OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3623     }
3624     if (S.undefIsContained())
3625       OS << "undef ";
3626   }
3627   OS << "} >)";
3628 
3629   return OS;
3630 }
3631 
3632 void AbstractAttribute::print(Attributor *A, raw_ostream &OS) const {
3633   OS << "[";
3634   OS << getName();
3635   OS << "] for CtxI ";
3636 
3637   if (auto *I = getCtxI()) {
3638     OS << "'";
3639     I->print(OS);
3640     OS << "'";
3641   } else
3642     OS << "<<null inst>>";
3643 
3644   OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3645      << '\n';
3646 }
3647 
3648 void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3649   print(OS);
3650 
3651   for (const auto &DepAA : Deps) {
3652     auto *AA = DepAA.getPointer();
3653     OS << "  updates ";
3654     AA->print(OS);
3655   }
3656 
3657   OS << '\n';
3658 }
3659 
3660 raw_ostream &llvm::operator<<(raw_ostream &OS,
3661                               const AAPointerInfo::Access &Acc) {
3662   OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3663   if (Acc.getLocalInst() != Acc.getRemoteInst())
3664     OS << " via " << *Acc.getLocalInst();
3665   if (Acc.getContent()) {
3666     if (*Acc.getContent())
3667       OS << " [" << **Acc.getContent() << "]";
3668     else
3669       OS << " [ <unknown> ]";
3670   }
3671   return OS;
3672 }
3673 ///}
3674 
3675 /// ----------------------------------------------------------------------------
3676 ///                       Pass (Manager) Boilerplate
3677 /// ----------------------------------------------------------------------------
3678 
3679 static bool runAttributorOnFunctions(InformationCache &InfoCache,
3680                                      SetVector<Function *> &Functions,
3681                                      AnalysisGetter &AG,
3682                                      CallGraphUpdater &CGUpdater,
3683                                      bool DeleteFns, bool IsModulePass) {
3684   if (Functions.empty())
3685     return false;
3686 
3687   LLVM_DEBUG({
3688     dbgs() << "[Attributor] Run on module with " << Functions.size()
3689            << " functions:\n";
3690     for (Function *Fn : Functions)
3691       dbgs() << "  - " << Fn->getName() << "\n";
3692   });
3693 
3694   // Create an Attributor and initially empty information cache that is filled
3695   // while we identify default attribute opportunities.
3696   AttributorConfig AC(CGUpdater);
3697   AC.IsModulePass = IsModulePass;
3698   AC.DeleteFns = DeleteFns;
3699   Attributor A(Functions, InfoCache, AC);
3700 
3701   // Create shallow wrappers for all functions that are not IPO amendable
3702   if (AllowShallowWrappers)
3703     for (Function *F : Functions)
3704       if (!A.isFunctionIPOAmendable(*F))
3705         Attributor::createShallowWrapper(*F);
3706 
3707   // Internalize non-exact functions
3708   // TODO: for now we eagerly internalize functions without calculating the
3709   //       cost, we need a cost interface to determine whether internalizing
3710   //       a function is "beneficial"
3711   if (AllowDeepWrapper) {
3712     unsigned FunSize = Functions.size();
3713     for (unsigned u = 0; u < FunSize; u++) {
3714       Function *F = Functions[u];
3715       if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3716           !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3717         Function *NewF = Attributor::internalizeFunction(*F);
3718         assert(NewF && "Could not internalize function.");
3719         Functions.insert(NewF);
3720 
3721         // Update call graph
3722         CGUpdater.replaceFunctionWith(*F, *NewF);
3723         for (const Use &U : NewF->uses())
3724           if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3725             auto *CallerF = CB->getCaller();
3726             CGUpdater.reanalyzeFunction(*CallerF);
3727           }
3728       }
3729     }
3730   }
3731 
3732   for (Function *F : Functions) {
3733     if (F->hasExactDefinition())
3734       NumFnWithExactDefinition++;
3735     else
3736       NumFnWithoutExactDefinition++;
3737 
3738     // We look at internal functions only on-demand but if any use is not a
3739     // direct call or outside the current set of analyzed functions, we have
3740     // to do it eagerly.
3741     if (F->hasLocalLinkage()) {
3742       if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3743             const auto *CB = dyn_cast<CallBase>(U.getUser());
3744             return CB && CB->isCallee(&U) &&
3745                    Functions.count(const_cast<Function *>(CB->getCaller()));
3746           }))
3747         continue;
3748     }
3749 
3750     // Populate the Attributor with abstract attribute opportunities in the
3751     // function and the information cache with IR information.
3752     A.identifyDefaultAbstractAttributes(*F);
3753   }
3754 
3755   ChangeStatus Changed = A.run();
3756 
3757   LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3758                     << " functions, result: " << Changed << ".\n");
3759   return Changed == ChangeStatus::CHANGED;
3760 }
3761 
3762 void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3763 
3764 void AADepGraph::dumpGraph() {
3765   static std::atomic<int> CallTimes;
3766   std::string Prefix;
3767 
3768   if (!DepGraphDotFileNamePrefix.empty())
3769     Prefix = DepGraphDotFileNamePrefix;
3770   else
3771     Prefix = "dep_graph";
3772   std::string Filename =
3773       Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3774 
3775   outs() << "Dependency graph dump to " << Filename << ".\n";
3776 
3777   std::error_code EC;
3778 
3779   raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
3780   if (!EC)
3781     llvm::WriteGraph(File, this);
3782 
3783   CallTimes++;
3784 }
3785 
3786 void AADepGraph::print() {
3787   for (auto DepAA : SyntheticRoot.Deps)
3788     cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
3789 }
3790 
3791 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
3792   FunctionAnalysisManager &FAM =
3793       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
3794   AnalysisGetter AG(FAM);
3795 
3796   SetVector<Function *> Functions;
3797   for (Function &F : M)
3798     Functions.insert(&F);
3799 
3800   CallGraphUpdater CGUpdater;
3801   BumpPtrAllocator Allocator;
3802   InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3803   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3804                                /* DeleteFns */ true, /* IsModulePass */ true)) {
3805     // FIXME: Think about passes we will preserve and add them here.
3806     return PreservedAnalyses::none();
3807   }
3808   return PreservedAnalyses::all();
3809 }
3810 
3811 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
3812                                            CGSCCAnalysisManager &AM,
3813                                            LazyCallGraph &CG,
3814                                            CGSCCUpdateResult &UR) {
3815   FunctionAnalysisManager &FAM =
3816       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
3817   AnalysisGetter AG(FAM);
3818 
3819   SetVector<Function *> Functions;
3820   for (LazyCallGraph::Node &N : C)
3821     Functions.insert(&N.getFunction());
3822 
3823   if (Functions.empty())
3824     return PreservedAnalyses::all();
3825 
3826   Module &M = *Functions.back()->getParent();
3827   CallGraphUpdater CGUpdater;
3828   CGUpdater.initialize(CG, C, AM, UR);
3829   BumpPtrAllocator Allocator;
3830   InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3831   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3832                                /* DeleteFns */ false,
3833                                /* IsModulePass */ false)) {
3834     // FIXME: Think about passes we will preserve and add them here.
3835     PreservedAnalyses PA;
3836     PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
3837     return PA;
3838   }
3839   return PreservedAnalyses::all();
3840 }
3841 
3842 namespace llvm {
3843 
3844 template <> struct GraphTraits<AADepGraphNode *> {
3845   using NodeRef = AADepGraphNode *;
3846   using DepTy = PointerIntPair<AADepGraphNode *, 1>;
3847   using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
3848 
3849   static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
3850   static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
3851 
3852   using ChildIteratorType =
3853       mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
3854   using ChildEdgeIteratorType = AADepGraphNode::DepSetTy::iterator;
3855 
3856   static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
3857 
3858   static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
3859 };
3860 
3861 template <>
3862 struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
3863   static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
3864 
3865   using nodes_iterator =
3866       mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
3867 
3868   static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
3869 
3870   static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
3871 };
3872 
3873 template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
3874   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3875 
3876   static std::string getNodeLabel(const AADepGraphNode *Node,
3877                                   const AADepGraph *DG) {
3878     std::string AAString;
3879     raw_string_ostream O(AAString);
3880     Node->print(O);
3881     return AAString;
3882   }
3883 };
3884 
3885 } // end namespace llvm
3886