xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/Attributor.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass that deduces and/or propagates
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/Attributor.h"
17 
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CallGraph.h"
25 #include "llvm/Analysis/CallGraphSCCPass.h"
26 #include "llvm/Analysis/InlineCost.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/MustExecute.h"
29 #include "llvm/IR/AttributeMask.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantFold.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/GlobalValue.h"
36 #include "llvm/IR/GlobalVariable.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/ValueHandle.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/DebugCounter.h"
46 #include "llvm/Support/FileSystem.h"
47 #include "llvm/Support/GraphWriter.h"
48 #include "llvm/Support/ModRef.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/Cloning.h"
52 #include "llvm/Transforms/Utils/Local.h"
53 #include <cstdint>
54 #include <memory>
55 
56 #ifdef EXPENSIVE_CHECKS
57 #include "llvm/IR/Verifier.h"
58 #endif
59 
60 #include <cassert>
61 #include <optional>
62 #include <string>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "attributor"
67 #define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
68 
69 DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
70               "Determine what attributes are manifested in the IR");
71 
72 STATISTIC(NumFnDeleted, "Number of function deleted");
73 STATISTIC(NumFnWithExactDefinition,
74           "Number of functions with exact definitions");
75 STATISTIC(NumFnWithoutExactDefinition,
76           "Number of functions without exact definitions");
77 STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
78 STATISTIC(NumAttributesTimedOut,
79           "Number of abstract attributes timed out before fixpoint");
80 STATISTIC(NumAttributesValidFixpoint,
81           "Number of abstract attributes in a valid fixpoint state");
82 STATISTIC(NumAttributesManifested,
83           "Number of abstract attributes manifested in IR");
84 
85 // TODO: Determine a good default value.
86 //
87 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
88 // (when run with the first 5 abstract attributes). The results also indicate
89 // that we never reach 32 iterations but always find a fixpoint sooner.
90 //
91 // This will become more evolved once we perform two interleaved fixpoint
92 // iterations: bottom-up and top-down.
93 static cl::opt<unsigned>
94     SetFixpointIterations("attributor-max-iterations", cl::Hidden,
95                           cl::desc("Maximal number of fixpoint iterations."),
96                           cl::init(32));
97 
98 static cl::opt<unsigned>
99     MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
100                            cl::Hidden,
101                            cl::desc("Maximal number of callees specialized for "
102                                     "a call base"),
103                            cl::init(UINT32_MAX));
104 
105 static cl::opt<unsigned, true> MaxInitializationChainLengthX(
106     "attributor-max-initialization-chain-length", cl::Hidden,
107     cl::desc(
108         "Maximal number of chained initializations (to avoid stack overflows)"),
109     cl::location(MaxInitializationChainLength), cl::init(1024));
110 unsigned llvm::MaxInitializationChainLength;
111 
112 static cl::opt<bool> AnnotateDeclarationCallSites(
113     "attributor-annotate-decl-cs", cl::Hidden,
114     cl::desc("Annotate call sites of function declarations."), cl::init(false));
115 
116 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
117                                        cl::init(true), cl::Hidden);
118 
119 static cl::opt<bool>
120     AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
121                          cl::desc("Allow the Attributor to create shallow "
122                                   "wrappers for non-exact definitions."),
123                          cl::init(false));
124 
125 static cl::opt<bool>
126     AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
127                      cl::desc("Allow the Attributor to use IP information "
128                               "derived from non-exact functions via cloning"),
129                      cl::init(false));
130 
131 // These options can only used for debug builds.
132 #ifndef NDEBUG
133 static cl::list<std::string>
134     SeedAllowList("attributor-seed-allow-list", cl::Hidden,
135                   cl::desc("Comma separated list of attribute names that are "
136                            "allowed to be seeded."),
137                   cl::CommaSeparated);
138 
139 static cl::list<std::string> FunctionSeedAllowList(
140     "attributor-function-seed-allow-list", cl::Hidden,
141     cl::desc("Comma separated list of function names that are "
142              "allowed to be seeded."),
143     cl::CommaSeparated);
144 #endif
145 
146 static cl::opt<bool>
147     DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
148                  cl::desc("Dump the dependency graph to dot files."),
149                  cl::init(false));
150 
151 static cl::opt<std::string> DepGraphDotFileNamePrefix(
152     "attributor-depgraph-dot-filename-prefix", cl::Hidden,
153     cl::desc("The prefix used for the CallGraph dot file names."));
154 
155 static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
156                                   cl::desc("View the dependency graph."),
157                                   cl::init(false));
158 
159 static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
160                                        cl::desc("Print attribute dependencies"),
161                                        cl::init(false));
162 
163 static cl::opt<bool> EnableCallSiteSpecific(
164     "attributor-enable-call-site-specific-deduction", cl::Hidden,
165     cl::desc("Allow the Attributor to do call site specific analysis"),
166     cl::init(false));
167 
168 static cl::opt<bool>
169     PrintCallGraph("attributor-print-call-graph", cl::Hidden,
170                    cl::desc("Print Attributor's internal call graph"),
171                    cl::init(false));
172 
173 static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
174                                       cl::Hidden,
175                                       cl::desc("Try to simplify all loads."),
176                                       cl::init(true));
177 
178 static cl::opt<bool> CloseWorldAssumption(
179     "attributor-assume-closed-world", cl::Hidden,
180     cl::desc("Should a closed world be assumed, or not. Default if not set."));
181 
182 /// Logic operators for the change status enum class.
183 ///
184 ///{
operator |(ChangeStatus L,ChangeStatus R)185 ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
186   return L == ChangeStatus::CHANGED ? L : R;
187 }
operator |=(ChangeStatus & L,ChangeStatus R)188 ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
189   L = L | R;
190   return L;
191 }
operator &(ChangeStatus L,ChangeStatus R)192 ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
193   return L == ChangeStatus::UNCHANGED ? L : R;
194 }
operator &=(ChangeStatus & L,ChangeStatus R)195 ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
196   L = L & R;
197   return L;
198 }
199 ///}
200 
isGPU(const Module & M)201 bool AA::isGPU(const Module &M) {
202   Triple T(M.getTargetTriple());
203   return T.isAMDGPU() || T.isNVPTX();
204 }
205 
isNoSyncInst(Attributor & A,const Instruction & I,const AbstractAttribute & QueryingAA)206 bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
207                       const AbstractAttribute &QueryingAA) {
208   // We are looking for volatile instructions or non-relaxed atomics.
209   if (const auto *CB = dyn_cast<CallBase>(&I)) {
210     if (CB->hasFnAttr(Attribute::NoSync))
211       return true;
212 
213     // Non-convergent and readnone imply nosync.
214     if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
215       return true;
216 
217     if (AANoSync::isNoSyncIntrinsic(&I))
218       return true;
219 
220     bool IsKnownNoSync;
221     return AA::hasAssumedIRAttr<Attribute::NoSync>(
222         A, &QueryingAA, IRPosition::callsite_function(*CB),
223         DepClassTy::OPTIONAL, IsKnownNoSync);
224   }
225 
226   if (!I.mayReadOrWriteMemory())
227     return true;
228 
229   return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
230 }
231 
isDynamicallyUnique(Attributor & A,const AbstractAttribute & QueryingAA,const Value & V,bool ForAnalysisOnly)232 bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
233                              const Value &V, bool ForAnalysisOnly) {
234   // TODO: See the AAInstanceInfo class comment.
235   if (!ForAnalysisOnly)
236     return false;
237   auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
238       QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
239   return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
240 }
241 
242 Constant *
getInitialValueForObj(Attributor & A,const AbstractAttribute & QueryingAA,Value & Obj,Type & Ty,const TargetLibraryInfo * TLI,const DataLayout & DL,AA::RangeTy * RangePtr)243 AA::getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA,
244                           Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
245                           const DataLayout &DL, AA::RangeTy *RangePtr) {
246   if (isa<AllocaInst>(Obj))
247     return UndefValue::get(&Ty);
248   if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
249     return Init;
250   auto *GV = dyn_cast<GlobalVariable>(&Obj);
251   if (!GV)
252     return nullptr;
253 
254   bool UsedAssumedInformation = false;
255   Constant *Initializer = nullptr;
256   if (A.hasGlobalVariableSimplificationCallback(*GV)) {
257     auto AssumedGV = A.getAssumedInitializerFromCallBack(
258         *GV, &QueryingAA, UsedAssumedInformation);
259     Initializer = *AssumedGV;
260     if (!Initializer)
261       return nullptr;
262   } else {
263     if (!GV->hasLocalLinkage() &&
264         (GV->isInterposable() || !(GV->isConstant() && GV->hasInitializer())))
265       return nullptr;
266     if (!GV->hasInitializer())
267       return UndefValue::get(&Ty);
268 
269     if (!Initializer)
270       Initializer = GV->getInitializer();
271   }
272 
273   if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
274     APInt Offset = APInt(64, RangePtr->Offset);
275     return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
276   }
277 
278   return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
279 }
280 
isValidInScope(const Value & V,const Function * Scope)281 bool AA::isValidInScope(const Value &V, const Function *Scope) {
282   if (isa<Constant>(V))
283     return true;
284   if (auto *I = dyn_cast<Instruction>(&V))
285     return I->getFunction() == Scope;
286   if (auto *A = dyn_cast<Argument>(&V))
287     return A->getParent() == Scope;
288   return false;
289 }
290 
isValidAtPosition(const AA::ValueAndContext & VAC,InformationCache & InfoCache)291 bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
292                            InformationCache &InfoCache) {
293   if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
294     return true;
295   const Function *Scope = nullptr;
296   const Instruction *CtxI = VAC.getCtxI();
297   if (CtxI)
298     Scope = CtxI->getFunction();
299   if (auto *A = dyn_cast<Argument>(VAC.getValue()))
300     return A->getParent() == Scope;
301   if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
302     if (I->getFunction() == Scope) {
303       if (const DominatorTree *DT =
304               InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
305                   *Scope))
306         return DT->dominates(I, CtxI);
307       // Local dominance check mostly for the old PM passes.
308       if (CtxI && I->getParent() == CtxI->getParent())
309         return llvm::any_of(
310             make_range(I->getIterator(), I->getParent()->end()),
311             [&](const Instruction &AfterI) { return &AfterI == CtxI; });
312     }
313   }
314   return false;
315 }
316 
getWithType(Value & V,Type & Ty)317 Value *AA::getWithType(Value &V, Type &Ty) {
318   if (V.getType() == &Ty)
319     return &V;
320   if (isa<PoisonValue>(V))
321     return PoisonValue::get(&Ty);
322   if (isa<UndefValue>(V))
323     return UndefValue::get(&Ty);
324   if (auto *C = dyn_cast<Constant>(&V)) {
325     if (C->isNullValue())
326       return Constant::getNullValue(&Ty);
327     if (C->getType()->isPointerTy() && Ty.isPointerTy())
328       return ConstantExpr::getPointerCast(C, &Ty);
329     if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
330       if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
331         return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
332       if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
333         return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
334     }
335   }
336   return nullptr;
337 }
338 
339 std::optional<Value *>
combineOptionalValuesInAAValueLatice(const std::optional<Value * > & A,const std::optional<Value * > & B,Type * Ty)340 AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
341                                          const std::optional<Value *> &B,
342                                          Type *Ty) {
343   if (A == B)
344     return A;
345   if (!B)
346     return A;
347   if (*B == nullptr)
348     return nullptr;
349   if (!A)
350     return Ty ? getWithType(**B, *Ty) : nullptr;
351   if (*A == nullptr)
352     return nullptr;
353   if (!Ty)
354     Ty = (*A)->getType();
355   if (isa_and_nonnull<UndefValue>(*A))
356     return getWithType(**B, *Ty);
357   if (isa<UndefValue>(*B))
358     return A;
359   if (*A && *B && *A == getWithType(**B, *Ty))
360     return A;
361   return nullptr;
362 }
363 
364 template <bool IsLoad, typename Ty>
getPotentialCopiesOfMemoryValue(Attributor & A,Ty & I,SmallSetVector<Value *,4> & PotentialCopies,SmallSetVector<Instruction *,4> * PotentialValueOrigins,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)365 static bool getPotentialCopiesOfMemoryValue(
366     Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
367     SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
368     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
369     bool OnlyExact) {
370   LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
371                     << " (only exact: " << OnlyExact << ")\n";);
372 
373   Value &Ptr = *I.getPointerOperand();
374   // Containers to remember the pointer infos and new copies while we are not
375   // sure that we can find all of them. If we abort we want to avoid spurious
376   // dependences and potential copies in the provided container.
377   SmallVector<const AAPointerInfo *> PIs;
378   SmallSetVector<Value *, 8> NewCopies;
379   SmallSetVector<Instruction *, 8> NewCopyOrigins;
380 
381   const auto *TLI =
382       A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
383 
384   auto Pred = [&](Value &Obj) {
385     LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
386     if (isa<UndefValue>(&Obj))
387       return true;
388     if (isa<ConstantPointerNull>(&Obj)) {
389       // A null pointer access can be undefined but any offset from null may
390       // be OK. We do not try to optimize the latter.
391       if (!NullPointerIsDefined(I.getFunction(),
392                                 Ptr.getType()->getPointerAddressSpace()) &&
393           A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
394                                  AA::Interprocedural) == &Obj)
395         return true;
396       LLVM_DEBUG(
397           dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
398       return false;
399     }
400     // TODO: Use assumed noalias return.
401     if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
402         !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
403       LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
404                         << "\n";);
405       return false;
406     }
407     if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
408       if (!GV->hasLocalLinkage() &&
409           !(GV->isConstant() && GV->hasInitializer())) {
410         LLVM_DEBUG(dbgs() << "Underlying object is global with external "
411                              "linkage, not supported yet: "
412                           << Obj << "\n";);
413         return false;
414       }
415 
416     bool NullOnly = true;
417     bool NullRequired = false;
418     auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
419                                         bool IsExact) {
420       if (!V || *V == nullptr)
421         NullOnly = false;
422       else if (isa<UndefValue>(*V))
423         /* No op */;
424       else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
425         NullRequired = !IsExact;
426       else
427         NullOnly = false;
428     };
429 
430     auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
431                                       Value &V) {
432       Value *AdjV = AA::getWithType(V, *I.getType());
433       if (!AdjV) {
434         LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
435                              "cannot be converted to read type: "
436                           << *Acc.getRemoteInst() << " : " << *I.getType()
437                           << "\n";);
438       }
439       return AdjV;
440     };
441 
442     auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
443       if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
444         return true;
445       if (IsLoad) {
446         if (Acc.isWrittenValueYetUndetermined())
447           return true;
448         if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
449           return false;
450         if (!Acc.isWrittenValueUnknown())
451           if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
452             if (NewCopies.count(V)) {
453               NewCopyOrigins.insert(Acc.getRemoteInst());
454               return true;
455             }
456         if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
457           if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
458             if (NewCopies.count(V)) {
459               NewCopyOrigins.insert(Acc.getRemoteInst());
460               return true;
461             }
462       }
463       return false;
464     };
465 
466     auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
467       if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
468         return true;
469       if (IsLoad && Acc.isWrittenValueYetUndetermined())
470         return true;
471       CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
472       if (OnlyExact && !IsExact && !NullOnly &&
473           !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
474         LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
475                           << ", abort!\n");
476         return false;
477       }
478       if (NullRequired && !NullOnly) {
479         LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
480                              "one, however found non-null one: "
481                           << *Acc.getRemoteInst() << ", abort!\n");
482         return false;
483       }
484       if (IsLoad) {
485         assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
486         if (!Acc.isWrittenValueUnknown()) {
487           Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
488           if (!V)
489             return false;
490           NewCopies.insert(V);
491           if (PotentialValueOrigins)
492             NewCopyOrigins.insert(Acc.getRemoteInst());
493           return true;
494         }
495         auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
496         if (!SI) {
497           LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
498                                "instruction not supported yet: "
499                             << *Acc.getRemoteInst() << "\n";);
500           return false;
501         }
502         Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
503         if (!V)
504           return false;
505         NewCopies.insert(V);
506         if (PotentialValueOrigins)
507           NewCopyOrigins.insert(SI);
508       } else {
509         assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
510         auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
511         if (!LI && OnlyExact) {
512           LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
513                                "instruction not supported yet: "
514                             << *Acc.getRemoteInst() << "\n";);
515           return false;
516         }
517         NewCopies.insert(Acc.getRemoteInst());
518       }
519       return true;
520     };
521 
522     // If the value has been written to we don't need the initial value of the
523     // object.
524     bool HasBeenWrittenTo = false;
525 
526     AA::RangeTy Range;
527     auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
528                                          DepClassTy::NONE);
529     if (!PI || !PI->forallInterferingAccesses(
530                    A, QueryingAA, I,
531                    /* FindInterferingWrites */ IsLoad,
532                    /* FindInterferingReads */ !IsLoad, CheckAccess,
533                    HasBeenWrittenTo, Range, SkipCB)) {
534       LLVM_DEBUG(
535           dbgs()
536           << "Failed to verify all interfering accesses for underlying object: "
537           << Obj << "\n");
538       return false;
539     }
540 
541     if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
542       const DataLayout &DL = A.getDataLayout();
543       Value *InitialValue = AA::getInitialValueForObj(
544           A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
545       if (!InitialValue) {
546         LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
547                              "underlying object, abort!\n");
548         return false;
549       }
550       CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
551       if (NullRequired && !NullOnly) {
552         LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
553                              "null or undef, abort!\n");
554         return false;
555       }
556 
557       NewCopies.insert(InitialValue);
558       if (PotentialValueOrigins)
559         NewCopyOrigins.insert(nullptr);
560     }
561 
562     PIs.push_back(PI);
563 
564     return true;
565   };
566 
567   const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
568       QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
569   if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
570     LLVM_DEBUG(
571         dbgs() << "Underlying objects stored into could not be determined\n";);
572     return false;
573   }
574 
575   // Only if we were successful collection all potential copies we record
576   // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
577   // given PotentialCopies container.
578   for (const auto *PI : PIs) {
579     if (!PI->getState().isAtFixpoint())
580       UsedAssumedInformation = true;
581     A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
582   }
583   PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
584   if (PotentialValueOrigins)
585     PotentialValueOrigins->insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
586 
587   return true;
588 }
589 
getPotentiallyLoadedValues(Attributor & A,LoadInst & LI,SmallSetVector<Value *,4> & PotentialValues,SmallSetVector<Instruction *,4> & PotentialValueOrigins,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)590 bool AA::getPotentiallyLoadedValues(
591     Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
592     SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
593     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
594     bool OnlyExact) {
595   return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
596       A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
597       UsedAssumedInformation, OnlyExact);
598 }
599 
getPotentialCopiesOfStoredValue(Attributor & A,StoreInst & SI,SmallSetVector<Value *,4> & PotentialCopies,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)600 bool AA::getPotentialCopiesOfStoredValue(
601     Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
602     const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
603     bool OnlyExact) {
604   return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
605       A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
606       OnlyExact);
607 }
608 
isAssumedReadOnlyOrReadNone(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool RequireReadNone,bool & IsKnown)609 static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
610                                         const AbstractAttribute &QueryingAA,
611                                         bool RequireReadNone, bool &IsKnown) {
612   if (RequireReadNone) {
613     if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
614             A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
615             /* IgnoreSubsumingPositions */ true))
616       return true;
617   } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
618                  A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
619                  /* IgnoreSubsumingPositions */ true))
620     return true;
621 
622   IRPosition::Kind Kind = IRP.getPositionKind();
623   if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
624     const auto *MemLocAA =
625         A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
626     if (MemLocAA && MemLocAA->isAssumedReadNone()) {
627       IsKnown = MemLocAA->isKnownReadNone();
628       if (!IsKnown)
629         A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
630       return true;
631     }
632   }
633 
634   const auto *MemBehaviorAA =
635       A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
636   if (MemBehaviorAA &&
637       (MemBehaviorAA->isAssumedReadNone() ||
638        (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
639     IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
640                               : MemBehaviorAA->isKnownReadOnly();
641     if (!IsKnown)
642       A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
643     return true;
644   }
645 
646   return false;
647 }
648 
isAssumedReadOnly(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool & IsKnown)649 bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
650                            const AbstractAttribute &QueryingAA, bool &IsKnown) {
651   return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
652                                      /* RequireReadNone */ false, IsKnown);
653 }
isAssumedReadNone(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool & IsKnown)654 bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
655                            const AbstractAttribute &QueryingAA, bool &IsKnown) {
656   return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
657                                      /* RequireReadNone */ true, IsKnown);
658 }
659 
660 static bool
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Instruction * ToI,const Function & ToFn,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)661 isPotentiallyReachable(Attributor &A, const Instruction &FromI,
662                        const Instruction *ToI, const Function &ToFn,
663                        const AbstractAttribute &QueryingAA,
664                        const AA::InstExclusionSetTy *ExclusionSet,
665                        std::function<bool(const Function &F)> GoBackwardsCB) {
666   DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
667     dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
668            << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
669            << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
670            << "]\n";
671     if (ExclusionSet)
672       for (auto *ES : *ExclusionSet)
673         dbgs() << *ES << "\n";
674   });
675 
676   // We know kernels (generally) cannot be called from within the module. Thus,
677   // for reachability we would need to step back from a kernel which would allow
678   // us to reach anything anyway. Even if a kernel is invoked from another
679   // kernel, values like allocas and shared memory are not accessible. We
680   // implicitly check for this situation to avoid costly lookups.
681   if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
682       !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
683       FromI.getFunction()->hasFnAttribute("kernel")) {
684     LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
685                          "module; success\n";);
686     return false;
687   }
688 
689   // If we can go arbitrarily backwards we will eventually reach an entry point
690   // that can reach ToI. Only if a set of blocks through which we cannot go is
691   // provided, or once we track internal functions not accessible from the
692   // outside, it makes sense to perform backwards analysis in the absence of a
693   // GoBackwardsCB.
694   if (!GoBackwardsCB && !ExclusionSet) {
695     LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
696                       << " is not checked backwards and does not have an "
697                          "exclusion set, abort\n");
698     return true;
699   }
700 
701   SmallPtrSet<const Instruction *, 8> Visited;
702   SmallVector<const Instruction *> Worklist;
703   Worklist.push_back(&FromI);
704 
705   while (!Worklist.empty()) {
706     const Instruction *CurFromI = Worklist.pop_back_val();
707     if (!Visited.insert(CurFromI).second)
708       continue;
709 
710     const Function *FromFn = CurFromI->getFunction();
711     if (FromFn == &ToFn) {
712       if (!ToI)
713         return true;
714       LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
715                         << " intraprocedurally\n");
716       const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
717           QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
718       bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
719                                            A, *CurFromI, *ToI, ExclusionSet);
720       LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
721                         << (Result ? "can potentially " : "cannot ") << "reach "
722                         << *ToI << " [Intra]\n");
723       if (Result)
724         return true;
725     }
726 
727     bool Result = true;
728     if (!ToFn.isDeclaration() && ToI) {
729       const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
730           QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
731       const Instruction &EntryI = ToFn.getEntryBlock().front();
732       Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
733                                         A, EntryI, *ToI, ExclusionSet);
734       LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
735                         << " " << (Result ? "can potentially " : "cannot ")
736                         << "reach @" << *ToI << " [ToFn]\n");
737     }
738 
739     if (Result) {
740       // The entry of the ToFn can reach the instruction ToI. If the current
741       // instruction is already known to reach the ToFn.
742       const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
743           QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
744       Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
745                                         A, *CurFromI, ToFn, ExclusionSet);
746       LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
747                         << " " << (Result ? "can potentially " : "cannot ")
748                         << "reach @" << ToFn.getName() << " [FromFn]\n");
749       if (Result)
750         return true;
751     }
752 
753     // TODO: Check assumed nounwind.
754     const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
755         QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
756     auto ReturnInstCB = [&](Instruction &Ret) {
757       bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
758                                            A, *CurFromI, Ret, ExclusionSet);
759       LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
760                         << (Result ? "can potentially " : "cannot ") << "reach "
761                         << Ret << " [Intra]\n");
762       return !Result;
763     };
764 
765     // Check if we can reach returns.
766     bool UsedAssumedInformation = false;
767     if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
768                                   {Instruction::Ret}, UsedAssumedInformation)) {
769       LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
770       continue;
771     }
772 
773     if (!GoBackwardsCB) {
774       LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
775                         << " is not checked backwards, abort\n");
776       return true;
777     }
778 
779     // If we do not go backwards from the FromFn we are done here and so far we
780     // could not find a way to reach ToFn/ToI.
781     if (!GoBackwardsCB(*FromFn))
782       continue;
783 
784     LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
785                       << FromFn->getName() << "\n");
786 
787     auto CheckCallSite = [&](AbstractCallSite ACS) {
788       CallBase *CB = ACS.getInstruction();
789       if (!CB)
790         return false;
791 
792       if (isa<InvokeInst>(CB))
793         return false;
794 
795       Instruction *Inst = CB->getNextNonDebugInstruction();
796       Worklist.push_back(Inst);
797       return true;
798     };
799 
800     Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
801                                      /* RequireAllCallSites */ true,
802                                      &QueryingAA, UsedAssumedInformation);
803     if (Result) {
804       LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
805                         << " in @" << FromFn->getName()
806                         << " failed, give up\n");
807       return true;
808     }
809 
810     LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
811                       << " in @" << FromFn->getName()
812                       << " worklist size is: " << Worklist.size() << "\n");
813   }
814   return false;
815 }
816 
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Instruction & ToI,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)817 bool AA::isPotentiallyReachable(
818     Attributor &A, const Instruction &FromI, const Instruction &ToI,
819     const AbstractAttribute &QueryingAA,
820     const AA::InstExclusionSetTy *ExclusionSet,
821     std::function<bool(const Function &F)> GoBackwardsCB) {
822   const Function *ToFn = ToI.getFunction();
823   return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
824                                   ExclusionSet, GoBackwardsCB);
825 }
826 
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Function & ToFn,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)827 bool AA::isPotentiallyReachable(
828     Attributor &A, const Instruction &FromI, const Function &ToFn,
829     const AbstractAttribute &QueryingAA,
830     const AA::InstExclusionSetTy *ExclusionSet,
831     std::function<bool(const Function &F)> GoBackwardsCB) {
832   return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
833                                   ExclusionSet, GoBackwardsCB);
834 }
835 
isAssumedThreadLocalObject(Attributor & A,Value & Obj,const AbstractAttribute & QueryingAA)836 bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
837                                     const AbstractAttribute &QueryingAA) {
838   if (isa<UndefValue>(Obj))
839     return true;
840   if (isa<AllocaInst>(Obj)) {
841     InformationCache &InfoCache = A.getInfoCache();
842     if (!InfoCache.stackIsAccessibleByOtherThreads()) {
843       LLVM_DEBUG(
844           dbgs() << "[AA] Object '" << Obj
845                  << "' is thread local; stack objects are thread local.\n");
846       return true;
847     }
848     bool IsKnownNoCapture;
849     bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
850         A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
851         IsKnownNoCapture);
852     LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
853                       << (IsAssumedNoCapture ? "" : "not") << " thread local; "
854                       << (IsAssumedNoCapture ? "non-" : "")
855                       << "captured stack object.\n");
856     return IsAssumedNoCapture;
857   }
858   if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
859     if (GV->isConstant()) {
860       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
861                         << "' is thread local; constant global\n");
862       return true;
863     }
864     if (GV->isThreadLocal()) {
865       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
866                         << "' is thread local; thread local global\n");
867       return true;
868     }
869   }
870 
871   if (A.getInfoCache().targetIsGPU()) {
872     if (Obj.getType()->getPointerAddressSpace() ==
873         (int)AA::GPUAddressSpace::Local) {
874       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
875                         << "' is thread local; GPU local memory\n");
876       return true;
877     }
878     if (Obj.getType()->getPointerAddressSpace() ==
879         (int)AA::GPUAddressSpace::Constant) {
880       LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
881                         << "' is thread local; GPU constant memory\n");
882       return true;
883     }
884   }
885 
886   LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
887   return false;
888 }
889 
isPotentiallyAffectedByBarrier(Attributor & A,const Instruction & I,const AbstractAttribute & QueryingAA)890 bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
891                                         const AbstractAttribute &QueryingAA) {
892   if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
893     return false;
894 
895   SmallSetVector<const Value *, 8> Ptrs;
896 
897   auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
898     if (!Loc || !Loc->Ptr) {
899       LLVM_DEBUG(
900           dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
901       return false;
902     }
903     Ptrs.insert(Loc->Ptr);
904     return true;
905   };
906 
907   if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
908     if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
909       return true;
910     if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
911       if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
912         return true;
913   } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
914     return true;
915 
916   return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
917 }
918 
isPotentiallyAffectedByBarrier(Attributor & A,ArrayRef<const Value * > Ptrs,const AbstractAttribute & QueryingAA,const Instruction * CtxI)919 bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
920                                         ArrayRef<const Value *> Ptrs,
921                                         const AbstractAttribute &QueryingAA,
922                                         const Instruction *CtxI) {
923   for (const Value *Ptr : Ptrs) {
924     if (!Ptr) {
925       LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
926       return true;
927     }
928 
929     auto Pred = [&](Value &Obj) {
930       if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
931         return true;
932       LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
933                         << "'; -> requires barrier\n");
934       return false;
935     };
936 
937     const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
938         QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
939     if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
940       return true;
941   }
942   return false;
943 }
944 
945 /// Return true if \p New is equal or worse than \p Old.
isEqualOrWorse(const Attribute & New,const Attribute & Old)946 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
947   if (!Old.isIntAttribute())
948     return true;
949 
950   return Old.getValueAsInt() >= New.getValueAsInt();
951 }
952 
953 /// Return true if the information provided by \p Attr was added to the
954 /// attribute set \p AttrSet. This is only the case if it was not already
955 /// present in \p AttrSet.
addIfNotExistent(LLVMContext & Ctx,const Attribute & Attr,AttributeSet AttrSet,bool ForceReplace,AttrBuilder & AB)956 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
957                              AttributeSet AttrSet, bool ForceReplace,
958                              AttrBuilder &AB) {
959 
960   if (Attr.isEnumAttribute()) {
961     Attribute::AttrKind Kind = Attr.getKindAsEnum();
962     if (AttrSet.hasAttribute(Kind))
963       return false;
964     AB.addAttribute(Kind);
965     return true;
966   }
967   if (Attr.isStringAttribute()) {
968     StringRef Kind = Attr.getKindAsString();
969     if (AttrSet.hasAttribute(Kind)) {
970       if (!ForceReplace)
971         return false;
972     }
973     AB.addAttribute(Kind, Attr.getValueAsString());
974     return true;
975   }
976   if (Attr.isIntAttribute()) {
977     Attribute::AttrKind Kind = Attr.getKindAsEnum();
978     if (!ForceReplace && Kind == Attribute::Memory) {
979       MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
980       if (ME == AttrSet.getMemoryEffects())
981         return false;
982       AB.addMemoryAttr(ME);
983       return true;
984     }
985     if (AttrSet.hasAttribute(Kind)) {
986       if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
987         return false;
988     }
989     AB.addAttribute(Attr);
990     return true;
991   }
992 
993   llvm_unreachable("Expected enum or string attribute!");
994 }
995 
getAssociatedArgument() const996 Argument *IRPosition::getAssociatedArgument() const {
997   if (getPositionKind() == IRP_ARGUMENT)
998     return cast<Argument>(&getAnchorValue());
999 
1000   // Not an Argument and no argument number means this is not a call site
1001   // argument, thus we cannot find a callback argument to return.
1002   int ArgNo = getCallSiteArgNo();
1003   if (ArgNo < 0)
1004     return nullptr;
1005 
1006   // Use abstract call sites to make the connection between the call site
1007   // values and the ones in callbacks. If a callback was found that makes use
1008   // of the underlying call site operand, we want the corresponding callback
1009   // callee argument and not the direct callee argument.
1010   std::optional<Argument *> CBCandidateArg;
1011   SmallVector<const Use *, 4> CallbackUses;
1012   const auto &CB = cast<CallBase>(getAnchorValue());
1013   AbstractCallSite::getCallbackUses(CB, CallbackUses);
1014   for (const Use *U : CallbackUses) {
1015     AbstractCallSite ACS(U);
1016     assert(ACS && ACS.isCallbackCall());
1017     if (!ACS.getCalledFunction())
1018       continue;
1019 
1020     for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1021 
1022       // Test if the underlying call site operand is argument number u of the
1023       // callback callee.
1024       if (ACS.getCallArgOperandNo(u) != ArgNo)
1025         continue;
1026 
1027       assert(ACS.getCalledFunction()->arg_size() > u &&
1028              "ACS mapped into var-args arguments!");
1029       if (CBCandidateArg) {
1030         CBCandidateArg = nullptr;
1031         break;
1032       }
1033       CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1034     }
1035   }
1036 
1037   // If we found a unique callback candidate argument, return it.
1038   if (CBCandidateArg && *CBCandidateArg)
1039     return *CBCandidateArg;
1040 
1041   // If no callbacks were found, or none used the underlying call site operand
1042   // exclusively, use the direct callee argument if available.
1043   auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1044   if (Callee && Callee->arg_size() > unsigned(ArgNo))
1045     return Callee->getArg(ArgNo);
1046 
1047   return nullptr;
1048 }
1049 
update(Attributor & A)1050 ChangeStatus AbstractAttribute::update(Attributor &A) {
1051   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1052   if (getState().isAtFixpoint())
1053     return HasChanged;
1054 
1055   LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1056 
1057   HasChanged = updateImpl(A);
1058 
1059   LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1060                     << "\n");
1061 
1062   return HasChanged;
1063 }
1064 
Attributor(SetVector<Function * > & Functions,InformationCache & InfoCache,AttributorConfig Configuration)1065 Attributor::Attributor(SetVector<Function *> &Functions,
1066                        InformationCache &InfoCache,
1067                        AttributorConfig Configuration)
1068     : Allocator(InfoCache.Allocator), Functions(Functions),
1069       InfoCache(InfoCache), Configuration(Configuration) {
1070   if (!isClosedWorldModule())
1071     return;
1072   for (Function *Fn : Functions)
1073     if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1074                             /*IgnoreCallbackUses=*/false,
1075                             /*IgnoreAssumeLikeCalls=*/true,
1076                             /*IgnoreLLVMUsed=*/true,
1077                             /*IgnoreARCAttachedCall=*/false,
1078                             /*IgnoreCastedDirectCall=*/true))
1079       InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1080 }
1081 
getAttrsFromAssumes(const IRPosition & IRP,Attribute::AttrKind AK,SmallVectorImpl<Attribute> & Attrs)1082 bool Attributor::getAttrsFromAssumes(const IRPosition &IRP,
1083                                      Attribute::AttrKind AK,
1084                                      SmallVectorImpl<Attribute> &Attrs) {
1085   assert(IRP.getPositionKind() != IRPosition::IRP_INVALID &&
1086          "Did expect a valid position!");
1087   MustBeExecutedContextExplorer *Explorer =
1088       getInfoCache().getMustBeExecutedContextExplorer();
1089   if (!Explorer)
1090     return false;
1091 
1092   Value &AssociatedValue = IRP.getAssociatedValue();
1093 
1094   const Assume2KnowledgeMap &A2K =
1095       getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1096 
1097   // Check if we found any potential assume use, if not we don't need to create
1098   // explorer iterators.
1099   if (A2K.empty())
1100     return false;
1101 
1102   LLVMContext &Ctx = AssociatedValue.getContext();
1103   unsigned AttrsSize = Attrs.size();
1104   auto EIt = Explorer->begin(IRP.getCtxI()),
1105        EEnd = Explorer->end(IRP.getCtxI());
1106   for (const auto &It : A2K)
1107     if (Explorer->findInContextOf(It.first, EIt, EEnd))
1108       Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1109   return AttrsSize != Attrs.size();
1110 }
1111 
1112 template <typename DescTy>
1113 ChangeStatus
updateAttrMap(const IRPosition & IRP,ArrayRef<DescTy> AttrDescs,function_ref<bool (const DescTy &,AttributeSet,AttributeMask &,AttrBuilder &)> CB)1114 Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1115                           function_ref<bool(const DescTy &, AttributeSet,
1116                                             AttributeMask &, AttrBuilder &)>
1117                               CB) {
1118   if (AttrDescs.empty())
1119     return ChangeStatus::UNCHANGED;
1120   switch (IRP.getPositionKind()) {
1121   case IRPosition::IRP_FLOAT:
1122   case IRPosition::IRP_INVALID:
1123     return ChangeStatus::UNCHANGED;
1124   default:
1125     break;
1126   };
1127 
1128   AttributeList AL;
1129   Value *AttrListAnchor = IRP.getAttrListAnchor();
1130   auto It = AttrsMap.find(AttrListAnchor);
1131   if (It == AttrsMap.end())
1132     AL = IRP.getAttrList();
1133   else
1134     AL = It->getSecond();
1135 
1136   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1137   auto AttrIdx = IRP.getAttrIdx();
1138   AttributeSet AS = AL.getAttributes(AttrIdx);
1139   AttributeMask AM;
1140   AttrBuilder AB(Ctx);
1141 
1142   ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1143   for (const DescTy &AttrDesc : AttrDescs)
1144     if (CB(AttrDesc, AS, AM, AB))
1145       HasChanged = ChangeStatus::CHANGED;
1146 
1147   if (HasChanged == ChangeStatus::UNCHANGED)
1148     return ChangeStatus::UNCHANGED;
1149 
1150   AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1151   AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1152   AttrsMap[AttrListAnchor] = AL;
1153   return ChangeStatus::CHANGED;
1154 }
1155 
hasAttr(const IRPosition & IRP,ArrayRef<Attribute::AttrKind> AttrKinds,bool IgnoreSubsumingPositions,Attribute::AttrKind ImpliedAttributeKind)1156 bool Attributor::hasAttr(const IRPosition &IRP,
1157                          ArrayRef<Attribute::AttrKind> AttrKinds,
1158                          bool IgnoreSubsumingPositions,
1159                          Attribute::AttrKind ImpliedAttributeKind) {
1160   bool Implied = false;
1161   bool HasAttr = false;
1162   auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1163                        AttributeMask &, AttrBuilder &) {
1164     if (AttrSet.hasAttribute(Kind)) {
1165       Implied |= Kind != ImpliedAttributeKind;
1166       HasAttr = true;
1167     }
1168     return false;
1169   };
1170   for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1171     updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1172     if (HasAttr)
1173       break;
1174     // The first position returned by the SubsumingPositionIterator is
1175     // always the position itself. If we ignore subsuming positions we
1176     // are done after the first iteration.
1177     if (IgnoreSubsumingPositions)
1178       break;
1179     Implied = true;
1180   }
1181   if (!HasAttr) {
1182     Implied = true;
1183     SmallVector<Attribute> Attrs;
1184     for (Attribute::AttrKind AK : AttrKinds)
1185       if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1186         HasAttr = true;
1187         break;
1188       }
1189   }
1190 
1191   // Check if we should manifest the implied attribute kind at the IRP.
1192   if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1193     manifestAttrs(IRP, {Attribute::get(IRP.getAnchorValue().getContext(),
1194                                        ImpliedAttributeKind)});
1195   return HasAttr;
1196 }
1197 
getAttrs(const IRPosition & IRP,ArrayRef<Attribute::AttrKind> AttrKinds,SmallVectorImpl<Attribute> & Attrs,bool IgnoreSubsumingPositions)1198 void Attributor::getAttrs(const IRPosition &IRP,
1199                           ArrayRef<Attribute::AttrKind> AttrKinds,
1200                           SmallVectorImpl<Attribute> &Attrs,
1201                           bool IgnoreSubsumingPositions) {
1202   auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1203                            AttributeSet AttrSet, AttributeMask &,
1204                            AttrBuilder &) {
1205     if (AttrSet.hasAttribute(Kind))
1206       Attrs.push_back(AttrSet.getAttribute(Kind));
1207     return false;
1208   };
1209   for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1210     updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1211     // The first position returned by the SubsumingPositionIterator is
1212     // always the position itself. If we ignore subsuming positions we
1213     // are done after the first iteration.
1214     if (IgnoreSubsumingPositions)
1215       break;
1216   }
1217   for (Attribute::AttrKind AK : AttrKinds)
1218     getAttrsFromAssumes(IRP, AK, Attrs);
1219 }
1220 
removeAttrs(const IRPosition & IRP,ArrayRef<Attribute::AttrKind> AttrKinds)1221 ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1222                                      ArrayRef<Attribute::AttrKind> AttrKinds) {
1223   auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1224                           AttributeMask &AM, AttrBuilder &) {
1225     if (!AttrSet.hasAttribute(Kind))
1226       return false;
1227     AM.addAttribute(Kind);
1228     return true;
1229   };
1230   return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1231 }
1232 
removeAttrs(const IRPosition & IRP,ArrayRef<StringRef> Attrs)1233 ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1234                                      ArrayRef<StringRef> Attrs) {
1235   auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1236                           AttributeMask &AM, AttrBuilder &) -> bool {
1237     if (!AttrSet.hasAttribute(Attr))
1238       return false;
1239     AM.addAttribute(Attr);
1240     return true;
1241   };
1242 
1243   return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1244 }
1245 
manifestAttrs(const IRPosition & IRP,ArrayRef<Attribute> Attrs,bool ForceReplace)1246 ChangeStatus Attributor::manifestAttrs(const IRPosition &IRP,
1247                                        ArrayRef<Attribute> Attrs,
1248                                        bool ForceReplace) {
1249   LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1250   auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1251                        AttributeMask &, AttrBuilder &AB) {
1252     return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1253   };
1254   return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1255 }
1256 
1257 const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
1258 const IRPosition
1259     IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
1260 
SubsumingPositionIterator(const IRPosition & IRP)1261 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
1262   IRPositions.emplace_back(IRP);
1263 
1264   // Helper to determine if operand bundles on a call site are benign or
1265   // potentially problematic. We handle only llvm.assume for now.
1266   auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1267     return (isa<IntrinsicInst>(CB) &&
1268             cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1269   };
1270 
1271   const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1272   switch (IRP.getPositionKind()) {
1273   case IRPosition::IRP_INVALID:
1274   case IRPosition::IRP_FLOAT:
1275   case IRPosition::IRP_FUNCTION:
1276     return;
1277   case IRPosition::IRP_ARGUMENT:
1278   case IRPosition::IRP_RETURNED:
1279     IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1280     return;
1281   case IRPosition::IRP_CALL_SITE:
1282     assert(CB && "Expected call site!");
1283     // TODO: We need to look at the operand bundles similar to the redirection
1284     //       in CallBase.
1285     if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1286       if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1287         IRPositions.emplace_back(IRPosition::function(*Callee));
1288     return;
1289   case IRPosition::IRP_CALL_SITE_RETURNED:
1290     assert(CB && "Expected call site!");
1291     // TODO: We need to look at the operand bundles similar to the redirection
1292     //       in CallBase.
1293     if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1294       if (auto *Callee =
1295               dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1296         IRPositions.emplace_back(IRPosition::returned(*Callee));
1297         IRPositions.emplace_back(IRPosition::function(*Callee));
1298         for (const Argument &Arg : Callee->args())
1299           if (Arg.hasReturnedAttr()) {
1300             IRPositions.emplace_back(
1301                 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1302             IRPositions.emplace_back(
1303                 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1304             IRPositions.emplace_back(IRPosition::argument(Arg));
1305           }
1306       }
1307     }
1308     IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1309     return;
1310   case IRPosition::IRP_CALL_SITE_ARGUMENT: {
1311     assert(CB && "Expected call site!");
1312     // TODO: We need to look at the operand bundles similar to the redirection
1313     //       in CallBase.
1314     if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1315       auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1316       if (Callee) {
1317         if (Argument *Arg = IRP.getAssociatedArgument())
1318           IRPositions.emplace_back(IRPosition::argument(*Arg));
1319         IRPositions.emplace_back(IRPosition::function(*Callee));
1320       }
1321     }
1322     IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1323     return;
1324   }
1325   }
1326 }
1327 
verify()1328 void IRPosition::verify() {
1329 #ifdef EXPENSIVE_CHECKS
1330   switch (getPositionKind()) {
1331   case IRP_INVALID:
1332     assert((CBContext == nullptr) &&
1333            "Invalid position must not have CallBaseContext!");
1334     assert(!Enc.getOpaqueValue() &&
1335            "Expected a nullptr for an invalid position!");
1336     return;
1337   case IRP_FLOAT:
1338     assert((!isa<Argument>(&getAssociatedValue())) &&
1339            "Expected specialized kind for argument values!");
1340     return;
1341   case IRP_RETURNED:
1342     assert(isa<Function>(getAsValuePtr()) &&
1343            "Expected function for a 'returned' position!");
1344     assert(getAsValuePtr() == &getAssociatedValue() &&
1345            "Associated value mismatch!");
1346     return;
1347   case IRP_CALL_SITE_RETURNED:
1348     assert((CBContext == nullptr) &&
1349            "'call site returned' position must not have CallBaseContext!");
1350     assert((isa<CallBase>(getAsValuePtr())) &&
1351            "Expected call base for 'call site returned' position!");
1352     assert(getAsValuePtr() == &getAssociatedValue() &&
1353            "Associated value mismatch!");
1354     return;
1355   case IRP_CALL_SITE:
1356     assert((CBContext == nullptr) &&
1357            "'call site function' position must not have CallBaseContext!");
1358     assert((isa<CallBase>(getAsValuePtr())) &&
1359            "Expected call base for 'call site function' position!");
1360     assert(getAsValuePtr() == &getAssociatedValue() &&
1361            "Associated value mismatch!");
1362     return;
1363   case IRP_FUNCTION:
1364     assert(isa<Function>(getAsValuePtr()) &&
1365            "Expected function for a 'function' position!");
1366     assert(getAsValuePtr() == &getAssociatedValue() &&
1367            "Associated value mismatch!");
1368     return;
1369   case IRP_ARGUMENT:
1370     assert(isa<Argument>(getAsValuePtr()) &&
1371            "Expected argument for a 'argument' position!");
1372     assert(getAsValuePtr() == &getAssociatedValue() &&
1373            "Associated value mismatch!");
1374     return;
1375   case IRP_CALL_SITE_ARGUMENT: {
1376     assert((CBContext == nullptr) &&
1377            "'call site argument' position must not have CallBaseContext!");
1378     Use *U = getAsUsePtr();
1379     (void)U; // Silence unused variable warning.
1380     assert(U && "Expected use for a 'call site argument' position!");
1381     assert(isa<CallBase>(U->getUser()) &&
1382            "Expected call base user for a 'call site argument' position!");
1383     assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1384            "Expected call base argument operand for a 'call site argument' "
1385            "position");
1386     assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1387                unsigned(getCallSiteArgNo()) &&
1388            "Argument number mismatch!");
1389     assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1390     return;
1391   }
1392   }
1393 #endif
1394 }
1395 
1396 std::optional<Constant *>
getAssumedConstant(const IRPosition & IRP,const AbstractAttribute & AA,bool & UsedAssumedInformation)1397 Attributor::getAssumedConstant(const IRPosition &IRP,
1398                                const AbstractAttribute &AA,
1399                                bool &UsedAssumedInformation) {
1400   // First check all callbacks provided by outside AAs. If any of them returns
1401   // a non-null value that is different from the associated value, or
1402   // std::nullopt, we assume it's simplified.
1403   for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1404     std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1405     if (!SimplifiedV)
1406       return std::nullopt;
1407     if (isa_and_nonnull<Constant>(*SimplifiedV))
1408       return cast<Constant>(*SimplifiedV);
1409     return nullptr;
1410   }
1411   if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1412     return C;
1413   SmallVector<AA::ValueAndContext> Values;
1414   if (getAssumedSimplifiedValues(IRP, &AA, Values,
1415                                  AA::ValueScope::Interprocedural,
1416                                  UsedAssumedInformation)) {
1417     if (Values.empty())
1418       return std::nullopt;
1419     if (auto *C = dyn_cast_or_null<Constant>(
1420             AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1421       return C;
1422   }
1423   return nullptr;
1424 }
1425 
getAssumedSimplified(const IRPosition & IRP,const AbstractAttribute * AA,bool & UsedAssumedInformation,AA::ValueScope S)1426 std::optional<Value *> Attributor::getAssumedSimplified(
1427     const IRPosition &IRP, const AbstractAttribute *AA,
1428     bool &UsedAssumedInformation, AA::ValueScope S) {
1429   // First check all callbacks provided by outside AAs. If any of them returns
1430   // a non-null value that is different from the associated value, or
1431   // std::nullopt, we assume it's simplified.
1432   for (auto &CB : SimplificationCallbacks.lookup(IRP))
1433     return CB(IRP, AA, UsedAssumedInformation);
1434 
1435   SmallVector<AA::ValueAndContext> Values;
1436   if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1437     return &IRP.getAssociatedValue();
1438   if (Values.empty())
1439     return std::nullopt;
1440   if (AA)
1441     if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1442       return V;
1443   if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1444       IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1445     return nullptr;
1446   return &IRP.getAssociatedValue();
1447 }
1448 
getAssumedSimplifiedValues(const IRPosition & InitialIRP,const AbstractAttribute * AA,SmallVectorImpl<AA::ValueAndContext> & Values,AA::ValueScope S,bool & UsedAssumedInformation,bool RecurseForSelectAndPHI)1449 bool Attributor::getAssumedSimplifiedValues(
1450     const IRPosition &InitialIRP, const AbstractAttribute *AA,
1451     SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1452     bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1453   SmallPtrSet<Value *, 8> Seen;
1454   SmallVector<IRPosition, 8> Worklist;
1455   Worklist.push_back(InitialIRP);
1456   while (!Worklist.empty()) {
1457     const IRPosition &IRP = Worklist.pop_back_val();
1458 
1459     // First check all callbacks provided by outside AAs. If any of them returns
1460     // a non-null value that is different from the associated value, or
1461     // std::nullopt, we assume it's simplified.
1462     int NV = Values.size();
1463     const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1464     for (const auto &CB : SimplificationCBs) {
1465       std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1466       if (!CBResult.has_value())
1467         continue;
1468       Value *V = *CBResult;
1469       if (!V)
1470         return false;
1471       if ((S & AA::ValueScope::Interprocedural) ||
1472           AA::isValidInScope(*V, IRP.getAnchorScope()))
1473         Values.push_back(AA::ValueAndContext{*V, nullptr});
1474       else
1475         return false;
1476     }
1477     if (SimplificationCBs.empty()) {
1478       // If no high-level/outside simplification occurred, use
1479       // AAPotentialValues.
1480       const auto *PotentialValuesAA =
1481           getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1482       if (PotentialValuesAA && PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1483         UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1484       } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1485         Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1486       } else {
1487         // TODO: We could visit all returns and add the operands.
1488         return false;
1489       }
1490     }
1491 
1492     if (!RecurseForSelectAndPHI)
1493       break;
1494 
1495     for (int I = NV, E = Values.size(); I < E; ++I) {
1496       Value *V = Values[I].getValue();
1497       if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1498         continue;
1499       if (!Seen.insert(V).second)
1500         continue;
1501       // Move the last element to this slot.
1502       Values[I] = Values[E - 1];
1503       // Eliminate the last slot, adjust the indices.
1504       Values.pop_back();
1505       --E;
1506       --I;
1507       // Add a new value (select or phi) to the worklist.
1508       Worklist.push_back(IRPosition::value(*V));
1509     }
1510   }
1511   return true;
1512 }
1513 
translateArgumentToCallSiteContent(std::optional<Value * > V,CallBase & CB,const AbstractAttribute & AA,bool & UsedAssumedInformation)1514 std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
1515     std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1516     bool &UsedAssumedInformation) {
1517   if (!V)
1518     return V;
1519   if (*V == nullptr || isa<Constant>(*V))
1520     return V;
1521   if (auto *Arg = dyn_cast<Argument>(*V))
1522     if (CB.getCalledOperand() == Arg->getParent() &&
1523         CB.arg_size() > Arg->getArgNo())
1524       if (!Arg->hasPointeeInMemoryValueAttr())
1525         return getAssumedSimplified(
1526             IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1527             UsedAssumedInformation, AA::Intraprocedural);
1528   return nullptr;
1529 }
1530 
~Attributor()1531 Attributor::~Attributor() {
1532   // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1533   // thus we cannot delete them. We can, and want to, destruct them though.
1534   for (auto &It : AAMap) {
1535     AbstractAttribute *AA = It.getSecond();
1536     AA->~AbstractAttribute();
1537   }
1538 }
1539 
isAssumedDead(const AbstractAttribute & AA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1540 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1541                                const AAIsDead *FnLivenessAA,
1542                                bool &UsedAssumedInformation,
1543                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
1544   if (!Configuration.UseLiveness)
1545     return false;
1546   const IRPosition &IRP = AA.getIRPosition();
1547   if (!Functions.count(IRP.getAnchorScope()))
1548     return false;
1549   return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1550                        CheckBBLivenessOnly, DepClass);
1551 }
1552 
isAssumedDead(const Use & U,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1553 bool Attributor::isAssumedDead(const Use &U,
1554                                const AbstractAttribute *QueryingAA,
1555                                const AAIsDead *FnLivenessAA,
1556                                bool &UsedAssumedInformation,
1557                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
1558   if (!Configuration.UseLiveness)
1559     return false;
1560   Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1561   if (!UserI)
1562     return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1563                          UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1564 
1565   if (auto *CB = dyn_cast<CallBase>(UserI)) {
1566     // For call site argument uses we can check if the argument is
1567     // unused/dead.
1568     if (CB->isArgOperand(&U)) {
1569       const IRPosition &CSArgPos =
1570           IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1571       return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1572                            UsedAssumedInformation, CheckBBLivenessOnly,
1573                            DepClass);
1574     }
1575   } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1576     const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1577     return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1578                          UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1579   } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1580     BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1581     return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1582                          UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1583   } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1584     if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1585       const IRPosition IRP = IRPosition::inst(*SI);
1586       const AAIsDead *IsDeadAA =
1587           getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1588       if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1589         if (QueryingAA)
1590           recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1591         if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1592           UsedAssumedInformation = true;
1593         return true;
1594       }
1595     }
1596   }
1597 
1598   return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1599                        UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1600 }
1601 
isAssumedDead(const Instruction & I,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass,bool CheckForDeadStore)1602 bool Attributor::isAssumedDead(const Instruction &I,
1603                                const AbstractAttribute *QueryingAA,
1604                                const AAIsDead *FnLivenessAA,
1605                                bool &UsedAssumedInformation,
1606                                bool CheckBBLivenessOnly, DepClassTy DepClass,
1607                                bool CheckForDeadStore) {
1608   if (!Configuration.UseLiveness)
1609     return false;
1610   const IRPosition::CallBaseContext *CBCtx =
1611       QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1612 
1613   if (ManifestAddedBlocks.contains(I.getParent()))
1614     return false;
1615 
1616   const Function &F = *I.getFunction();
1617   if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1618     FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1619                                               QueryingAA, DepClassTy::NONE);
1620 
1621   // Don't use recursive reasoning.
1622   if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1623     return false;
1624 
1625   // If we have a context instruction and a liveness AA we use it.
1626   if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1627                           : FnLivenessAA->isAssumedDead(&I)) {
1628     if (QueryingAA)
1629       recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1630     if (!FnLivenessAA->isKnownDead(&I))
1631       UsedAssumedInformation = true;
1632     return true;
1633   }
1634 
1635   if (CheckBBLivenessOnly)
1636     return false;
1637 
1638   const IRPosition IRP = IRPosition::inst(I, CBCtx);
1639   const AAIsDead *IsDeadAA =
1640       getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1641 
1642   // Don't use recursive reasoning.
1643   if (!IsDeadAA || QueryingAA == IsDeadAA)
1644     return false;
1645 
1646   if (IsDeadAA->isAssumedDead()) {
1647     if (QueryingAA)
1648       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1649     if (!IsDeadAA->isKnownDead())
1650       UsedAssumedInformation = true;
1651     return true;
1652   }
1653 
1654   if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1655     if (QueryingAA)
1656       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1657     if (!IsDeadAA->isKnownDead())
1658       UsedAssumedInformation = true;
1659     return true;
1660   }
1661 
1662   return false;
1663 }
1664 
isAssumedDead(const IRPosition & IRP,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1665 bool Attributor::isAssumedDead(const IRPosition &IRP,
1666                                const AbstractAttribute *QueryingAA,
1667                                const AAIsDead *FnLivenessAA,
1668                                bool &UsedAssumedInformation,
1669                                bool CheckBBLivenessOnly, DepClassTy DepClass) {
1670   if (!Configuration.UseLiveness)
1671     return false;
1672   // Don't check liveness for constants, e.g. functions, used as (floating)
1673   // values since the context instruction and such is here meaningless.
1674   if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
1675       isa<Constant>(IRP.getAssociatedValue())) {
1676     return false;
1677   }
1678 
1679   Instruction *CtxI = IRP.getCtxI();
1680   if (CtxI &&
1681       isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1682                     /* CheckBBLivenessOnly */ true,
1683                     CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1684     return true;
1685 
1686   if (CheckBBLivenessOnly)
1687     return false;
1688 
1689   // If we haven't succeeded we query the specific liveness info for the IRP.
1690   const AAIsDead *IsDeadAA;
1691   if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1692     IsDeadAA = getOrCreateAAFor<AAIsDead>(
1693         IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
1694         QueryingAA, DepClassTy::NONE);
1695   else
1696     IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1697 
1698   // Don't use recursive reasoning.
1699   if (!IsDeadAA || QueryingAA == IsDeadAA)
1700     return false;
1701 
1702   if (IsDeadAA->isAssumedDead()) {
1703     if (QueryingAA)
1704       recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1705     if (!IsDeadAA->isKnownDead())
1706       UsedAssumedInformation = true;
1707     return true;
1708   }
1709 
1710   return false;
1711 }
1712 
isAssumedDead(const BasicBlock & BB,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,DepClassTy DepClass)1713 bool Attributor::isAssumedDead(const BasicBlock &BB,
1714                                const AbstractAttribute *QueryingAA,
1715                                const AAIsDead *FnLivenessAA,
1716                                DepClassTy DepClass) {
1717   if (!Configuration.UseLiveness)
1718     return false;
1719   const Function &F = *BB.getParent();
1720   if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1721     FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1722                                               QueryingAA, DepClassTy::NONE);
1723 
1724   // Don't use recursive reasoning.
1725   if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1726     return false;
1727 
1728   if (FnLivenessAA->isAssumedDead(&BB)) {
1729     if (QueryingAA)
1730       recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1731     return true;
1732   }
1733 
1734   return false;
1735 }
1736 
checkForAllCallees(function_ref<bool (ArrayRef<const Function * >)> Pred,const AbstractAttribute & QueryingAA,const CallBase & CB)1737 bool Attributor::checkForAllCallees(
1738     function_ref<bool(ArrayRef<const Function *>)> Pred,
1739     const AbstractAttribute &QueryingAA, const CallBase &CB) {
1740   if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1741     return Pred(Callee);
1742 
1743   const auto *CallEdgesAA = getAAFor<AACallEdges>(
1744       QueryingAA, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
1745   if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1746     return false;
1747 
1748   const auto &Callees = CallEdgesAA->getOptimisticEdges();
1749   return Pred(Callees.getArrayRef());
1750 }
1751 
canMarkAsVisited(const User * Usr)1752 bool canMarkAsVisited(const User *Usr) {
1753   return isa<PHINode>(Usr) || !isa<Instruction>(Usr);
1754 }
1755 
checkForAllUses(function_ref<bool (const Use &,bool &)> Pred,const AbstractAttribute & QueryingAA,const Value & V,bool CheckBBLivenessOnly,DepClassTy LivenessDepClass,bool IgnoreDroppableUses,function_ref<bool (const Use & OldU,const Use & NewU)> EquivalentUseCB)1756 bool Attributor::checkForAllUses(
1757     function_ref<bool(const Use &, bool &)> Pred,
1758     const AbstractAttribute &QueryingAA, const Value &V,
1759     bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1760     bool IgnoreDroppableUses,
1761     function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1762 
1763   // Check virtual uses first.
1764   for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1765     if (!CB(*this, &QueryingAA))
1766       return false;
1767 
1768   // Check the trivial case first as it catches void values.
1769   if (V.use_empty())
1770     return true;
1771 
1772   const IRPosition &IRP = QueryingAA.getIRPosition();
1773   SmallVector<const Use *, 16> Worklist;
1774   SmallPtrSet<const Use *, 16> Visited;
1775 
1776   auto AddUsers = [&](const Value &V, const Use *OldUse) {
1777     for (const Use &UU : V.uses()) {
1778       if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1779         LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1780                              "rejected by the equivalence call back: "
1781                           << *UU << "!\n");
1782         return false;
1783       }
1784 
1785       Worklist.push_back(&UU);
1786     }
1787     return true;
1788   };
1789 
1790   AddUsers(V, /* OldUse */ nullptr);
1791 
1792   LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1793                     << " initial uses to check\n");
1794 
1795   const Function *ScopeFn = IRP.getAnchorScope();
1796   const auto *LivenessAA =
1797       ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1798                                    DepClassTy::NONE)
1799               : nullptr;
1800 
1801   while (!Worklist.empty()) {
1802     const Use *U = Worklist.pop_back_val();
1803     if (canMarkAsVisited(U->getUser()) && !Visited.insert(U).second)
1804       continue;
1805     DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1806       if (auto *Fn = dyn_cast<Function>(U->getUser()))
1807         dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1808                << "\n";
1809       else
1810         dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1811                << "\n";
1812     });
1813     bool UsedAssumedInformation = false;
1814     if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1815                       CheckBBLivenessOnly, LivenessDepClass)) {
1816       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1817                       dbgs() << "[Attributor] Dead use, skip!\n");
1818       continue;
1819     }
1820     if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1821       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1822                       dbgs() << "[Attributor] Droppable user, skip!\n");
1823       continue;
1824     }
1825 
1826     if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1827       if (&SI->getOperandUse(0) == U) {
1828         if (!Visited.insert(U).second)
1829           continue;
1830         SmallSetVector<Value *, 4> PotentialCopies;
1831         if (AA::getPotentialCopiesOfStoredValue(
1832                 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1833                 /* OnlyExact */ true)) {
1834           DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1835                           dbgs()
1836                               << "[Attributor] Value is stored, continue with "
1837                               << PotentialCopies.size()
1838                               << " potential copies instead!\n");
1839           for (Value *PotentialCopy : PotentialCopies)
1840             if (!AddUsers(*PotentialCopy, U))
1841               return false;
1842           continue;
1843         }
1844       }
1845     }
1846 
1847     bool Follow = false;
1848     if (!Pred(*U, Follow))
1849       return false;
1850     if (!Follow)
1851       continue;
1852 
1853     User &Usr = *U->getUser();
1854     AddUsers(Usr, /* OldUse */ nullptr);
1855 
1856     auto *RI = dyn_cast<ReturnInst>(&Usr);
1857     if (!RI)
1858       continue;
1859 
1860     Function &F = *RI->getFunction();
1861     auto CallSitePred = [&](AbstractCallSite ACS) {
1862       return AddUsers(*ACS.getInstruction(), U);
1863     };
1864     if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1865                               &QueryingAA, UsedAssumedInformation)) {
1866       LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1867                            "to all call sites: "
1868                         << *RI << "\n");
1869       return false;
1870     }
1871   }
1872 
1873   return true;
1874 }
1875 
checkForAllCallSites(function_ref<bool (AbstractCallSite)> Pred,const AbstractAttribute & QueryingAA,bool RequireAllCallSites,bool & UsedAssumedInformation)1876 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1877                                       const AbstractAttribute &QueryingAA,
1878                                       bool RequireAllCallSites,
1879                                       bool &UsedAssumedInformation) {
1880   // We can try to determine information from
1881   // the call sites. However, this is only possible all call sites are known,
1882   // hence the function has internal linkage.
1883   const IRPosition &IRP = QueryingAA.getIRPosition();
1884   const Function *AssociatedFunction = IRP.getAssociatedFunction();
1885   if (!AssociatedFunction) {
1886     LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1887                       << "\n");
1888     return false;
1889   }
1890 
1891   return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1892                               &QueryingAA, UsedAssumedInformation);
1893 }
1894 
checkForAllCallSites(function_ref<bool (AbstractCallSite)> Pred,const Function & Fn,bool RequireAllCallSites,const AbstractAttribute * QueryingAA,bool & UsedAssumedInformation,bool CheckPotentiallyDead)1895 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1896                                       const Function &Fn,
1897                                       bool RequireAllCallSites,
1898                                       const AbstractAttribute *QueryingAA,
1899                                       bool &UsedAssumedInformation,
1900                                       bool CheckPotentiallyDead) {
1901   if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1902     LLVM_DEBUG(
1903         dbgs()
1904         << "[Attributor] Function " << Fn.getName()
1905         << " has no internal linkage, hence not all call sites are known\n");
1906     return false;
1907   }
1908   // Check virtual uses first.
1909   for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1910     if (!CB(*this, QueryingAA))
1911       return false;
1912 
1913   SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
1914   for (unsigned u = 0; u < Uses.size(); ++u) {
1915     const Use &U = *Uses[u];
1916     DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1917       if (auto *Fn = dyn_cast<Function>(U))
1918         dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1919                << *U.getUser() << "\n";
1920       else
1921         dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1922                << "\n";
1923     });
1924     if (!CheckPotentiallyDead &&
1925         isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1926                       /* CheckBBLivenessOnly */ true)) {
1927       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1928                       dbgs() << "[Attributor] Dead use, skip!\n");
1929       continue;
1930     }
1931     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1932       if (CE->isCast() && CE->getType()->isPointerTy()) {
1933         DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1934           dbgs() << "[Attributor] Use, is constant cast expression, add "
1935                  << CE->getNumUses() << " uses of that expression instead!\n";
1936         });
1937         for (const Use &CEU : CE->uses())
1938           Uses.push_back(&CEU);
1939         continue;
1940       }
1941     }
1942 
1943     AbstractCallSite ACS(&U);
1944     if (!ACS) {
1945       LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1946                         << " has non call site use " << *U.get() << " in "
1947                         << *U.getUser() << "\n");
1948       // BlockAddress users are allowed.
1949       if (isa<BlockAddress>(U.getUser()))
1950         continue;
1951       return false;
1952     }
1953 
1954     const Use *EffectiveUse =
1955         ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1956     if (!ACS.isCallee(EffectiveUse)) {
1957       if (!RequireAllCallSites) {
1958         LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1959                           << " is not a call of " << Fn.getName()
1960                           << ", skip use\n");
1961         continue;
1962       }
1963       LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1964                         << " is an invalid use of " << Fn.getName() << "\n");
1965       return false;
1966     }
1967 
1968     // Make sure the arguments that can be matched between the call site and the
1969     // callee argee on their type. It is unlikely they do not and it doesn't
1970     // make sense for all attributes to know/care about this.
1971     assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1972     unsigned MinArgsParams =
1973         std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1974     for (unsigned u = 0; u < MinArgsParams; ++u) {
1975       Value *CSArgOp = ACS.getCallArgOperand(u);
1976       if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1977         LLVM_DEBUG(
1978             dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1979                    << u << "@" << Fn.getName() << ": "
1980                    << *Fn.getArg(u)->getType() << " vs. "
1981                    << *ACS.getCallArgOperand(u)->getType() << "\n");
1982         return false;
1983       }
1984     }
1985 
1986     if (Pred(ACS))
1987       continue;
1988 
1989     LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1990                       << *ACS.getInstruction() << "\n");
1991     return false;
1992   }
1993 
1994   return true;
1995 }
1996 
shouldPropagateCallBaseContext(const IRPosition & IRP)1997 bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1998   // TODO: Maintain a cache of Values that are
1999   // on the pathway from a Argument to a Instruction that would effect the
2000   // liveness/return state etc.
2001   return EnableCallSiteSpecific;
2002 }
2003 
checkForAllReturnedValues(function_ref<bool (Value &)> Pred,const AbstractAttribute & QueryingAA,AA::ValueScope S,bool RecurseForSelectAndPHI)2004 bool Attributor::checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
2005                                            const AbstractAttribute &QueryingAA,
2006                                            AA::ValueScope S,
2007                                            bool RecurseForSelectAndPHI) {
2008 
2009   const IRPosition &IRP = QueryingAA.getIRPosition();
2010   const Function *AssociatedFunction = IRP.getAssociatedFunction();
2011   if (!AssociatedFunction)
2012     return false;
2013 
2014   bool UsedAssumedInformation = false;
2015   SmallVector<AA::ValueAndContext> Values;
2016   if (!getAssumedSimplifiedValues(
2017           IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2018           UsedAssumedInformation, RecurseForSelectAndPHI))
2019     return false;
2020 
2021   return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2022     return Pred(*VAC.getValue());
2023   });
2024 }
2025 
checkForAllInstructionsImpl(Attributor * A,InformationCache::OpcodeInstMapTy & OpcodeInstMap,function_ref<bool (Instruction &)> Pred,const AbstractAttribute * QueryingAA,const AAIsDead * LivenessAA,ArrayRef<unsigned> Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly=false,bool CheckPotentiallyDead=false)2026 static bool checkForAllInstructionsImpl(
2027     Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
2028     function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2029     const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2030     bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2031     bool CheckPotentiallyDead = false) {
2032   for (unsigned Opcode : Opcodes) {
2033     // Check if we have instructions with this opcode at all first.
2034     auto *Insts = OpcodeInstMap.lookup(Opcode);
2035     if (!Insts)
2036       continue;
2037 
2038     for (Instruction *I : *Insts) {
2039       // Skip dead instructions.
2040       if (A && !CheckPotentiallyDead &&
2041           A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2042                            UsedAssumedInformation, CheckBBLivenessOnly)) {
2043         DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2044                         dbgs() << "[Attributor] Instruction " << *I
2045                                << " is potentially dead, skip!\n";);
2046         continue;
2047       }
2048 
2049       if (!Pred(*I))
2050         return false;
2051     }
2052   }
2053   return true;
2054 }
2055 
checkForAllInstructions(function_ref<bool (Instruction &)> Pred,const Function * Fn,const AbstractAttribute * QueryingAA,ArrayRef<unsigned> Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,bool CheckPotentiallyDead)2056 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2057                                          const Function *Fn,
2058                                          const AbstractAttribute *QueryingAA,
2059                                          ArrayRef<unsigned> Opcodes,
2060                                          bool &UsedAssumedInformation,
2061                                          bool CheckBBLivenessOnly,
2062                                          bool CheckPotentiallyDead) {
2063   // Since we need to provide instructions we have to have an exact definition.
2064   if (!Fn || Fn->isDeclaration())
2065     return false;
2066 
2067   const IRPosition &QueryIRP = IRPosition::function(*Fn);
2068   const auto *LivenessAA =
2069       CheckPotentiallyDead && QueryingAA
2070           ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2071           : nullptr;
2072 
2073   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2074   if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2075                                    LivenessAA, Opcodes, UsedAssumedInformation,
2076                                    CheckBBLivenessOnly, CheckPotentiallyDead))
2077     return false;
2078 
2079   return true;
2080 }
2081 
checkForAllInstructions(function_ref<bool (Instruction &)> Pred,const AbstractAttribute & QueryingAA,ArrayRef<unsigned> Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,bool CheckPotentiallyDead)2082 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2083                                          const AbstractAttribute &QueryingAA,
2084                                          ArrayRef<unsigned> Opcodes,
2085                                          bool &UsedAssumedInformation,
2086                                          bool CheckBBLivenessOnly,
2087                                          bool CheckPotentiallyDead) {
2088   const IRPosition &IRP = QueryingAA.getIRPosition();
2089   const Function *AssociatedFunction = IRP.getAssociatedFunction();
2090   return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2091                                  UsedAssumedInformation, CheckBBLivenessOnly,
2092                                  CheckPotentiallyDead);
2093 }
2094 
checkForAllReadWriteInstructions(function_ref<bool (Instruction &)> Pred,AbstractAttribute & QueryingAA,bool & UsedAssumedInformation)2095 bool Attributor::checkForAllReadWriteInstructions(
2096     function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2097     bool &UsedAssumedInformation) {
2098   TimeTraceScope TS("checkForAllReadWriteInstructions");
2099 
2100   const Function *AssociatedFunction =
2101       QueryingAA.getIRPosition().getAssociatedFunction();
2102   if (!AssociatedFunction)
2103     return false;
2104 
2105   const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2106   const auto *LivenessAA =
2107       getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2108 
2109   for (Instruction *I :
2110        InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2111     // Skip dead instructions.
2112     if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2113                       UsedAssumedInformation))
2114       continue;
2115 
2116     if (!Pred(*I))
2117       return false;
2118   }
2119 
2120   return true;
2121 }
2122 
runTillFixpoint()2123 void Attributor::runTillFixpoint() {
2124   TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2125   LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2126                     << DG.SyntheticRoot.Deps.size()
2127                     << " abstract attributes.\n");
2128 
2129   // Now that all abstract attributes are collected and initialized we start
2130   // the abstract analysis.
2131 
2132   unsigned IterationCounter = 1;
2133   unsigned MaxIterations =
2134       Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2135 
2136   SmallVector<AbstractAttribute *, 32> ChangedAAs;
2137   SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2138   Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
2139 
2140   do {
2141     // Remember the size to determine new attributes.
2142     size_t NumAAs = DG.SyntheticRoot.Deps.size();
2143     LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2144                       << ", Worklist size: " << Worklist.size() << "\n");
2145 
2146     // For invalid AAs we can fix dependent AAs that have a required dependence,
2147     // thereby folding long dependence chains in a single step without the need
2148     // to run updates.
2149     for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2150       AbstractAttribute *InvalidAA = InvalidAAs[u];
2151 
2152       // Check the dependences to fast track invalidation.
2153       DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2154                       dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2155                              << " has " << InvalidAA->Deps.size()
2156                              << " required & optional dependences\n");
2157       for (auto &DepIt : InvalidAA->Deps) {
2158         AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2159         if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2160           DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2161                           dbgs() << " - recompute: " << *DepAA);
2162           Worklist.insert(DepAA);
2163           continue;
2164         }
2165         DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
2166                                                 << " - invalidate: " << *DepAA);
2167         DepAA->getState().indicatePessimisticFixpoint();
2168         assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2169         if (!DepAA->getState().isValidState())
2170           InvalidAAs.insert(DepAA);
2171         else
2172           ChangedAAs.push_back(DepAA);
2173       }
2174       InvalidAA->Deps.clear();
2175     }
2176 
2177     // Add all abstract attributes that are potentially dependent on one that
2178     // changed to the work list.
2179     for (AbstractAttribute *ChangedAA : ChangedAAs) {
2180       for (auto &DepIt : ChangedAA->Deps)
2181         Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2182       ChangedAA->Deps.clear();
2183     }
2184 
2185     LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2186                       << ", Worklist+Dependent size: " << Worklist.size()
2187                       << "\n");
2188 
2189     // Reset the changed and invalid set.
2190     ChangedAAs.clear();
2191     InvalidAAs.clear();
2192 
2193     // Update all abstract attribute in the work list and record the ones that
2194     // changed.
2195     for (AbstractAttribute *AA : Worklist) {
2196       const auto &AAState = AA->getState();
2197       if (!AAState.isAtFixpoint())
2198         if (updateAA(*AA) == ChangeStatus::CHANGED)
2199           ChangedAAs.push_back(AA);
2200 
2201       // Use the InvalidAAs vector to propagate invalid states fast transitively
2202       // without requiring updates.
2203       if (!AAState.isValidState())
2204         InvalidAAs.insert(AA);
2205     }
2206 
2207     // Add attributes to the changed set if they have been created in the last
2208     // iteration.
2209     ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2210                       DG.SyntheticRoot.end());
2211 
2212     // Reset the work list and repopulate with the changed abstract attributes.
2213     // Note that dependent ones are added above.
2214     Worklist.clear();
2215     Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2216     Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2217                     QueryAAsAwaitingUpdate.end());
2218     QueryAAsAwaitingUpdate.clear();
2219 
2220   } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2221 
2222   if (IterationCounter > MaxIterations && !Functions.empty()) {
2223     auto Remark = [&](OptimizationRemarkMissed ORM) {
2224       return ORM << "Attributor did not reach a fixpoint after "
2225                  << ore::NV("Iterations", MaxIterations) << " iterations.";
2226     };
2227     Function *F = Functions.front();
2228     emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2229   }
2230 
2231   LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2232                     << IterationCounter << "/" << MaxIterations
2233                     << " iterations\n");
2234 
2235   // Reset abstract arguments not settled in a sound fixpoint by now. This
2236   // happens when we stopped the fixpoint iteration early. Note that only the
2237   // ones marked as "changed" *and* the ones transitively depending on them
2238   // need to be reverted to a pessimistic state. Others might not be in a
2239   // fixpoint state but we can use the optimistic results for them anyway.
2240   SmallPtrSet<AbstractAttribute *, 32> Visited;
2241   for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2242     AbstractAttribute *ChangedAA = ChangedAAs[u];
2243     if (!Visited.insert(ChangedAA).second)
2244       continue;
2245 
2246     AbstractState &State = ChangedAA->getState();
2247     if (!State.isAtFixpoint()) {
2248       State.indicatePessimisticFixpoint();
2249 
2250       NumAttributesTimedOut++;
2251     }
2252 
2253     for (auto &DepIt : ChangedAA->Deps)
2254       ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2255     ChangedAA->Deps.clear();
2256   }
2257 
2258   LLVM_DEBUG({
2259     if (!Visited.empty())
2260       dbgs() << "\n[Attributor] Finalized " << Visited.size()
2261              << " abstract attributes.\n";
2262   });
2263 }
2264 
registerForUpdate(AbstractAttribute & AA)2265 void Attributor::registerForUpdate(AbstractAttribute &AA) {
2266   assert(AA.isQueryAA() &&
2267          "Non-query AAs should not be required to register for updates!");
2268   QueryAAsAwaitingUpdate.insert(&AA);
2269 }
2270 
manifestAttributes()2271 ChangeStatus Attributor::manifestAttributes() {
2272   TimeTraceScope TimeScope("Attributor::manifestAttributes");
2273   size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2274 
2275   unsigned NumManifested = 0;
2276   unsigned NumAtFixpoint = 0;
2277   ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2278   for (auto &DepAA : DG.SyntheticRoot.Deps) {
2279     AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2280     AbstractState &State = AA->getState();
2281 
2282     // If there is not already a fixpoint reached, we can now take the
2283     // optimistic state. This is correct because we enforced a pessimistic one
2284     // on abstract attributes that were transitively dependent on a changed one
2285     // already above.
2286     if (!State.isAtFixpoint())
2287       State.indicateOptimisticFixpoint();
2288 
2289     // We must not manifest Attributes that use Callbase info.
2290     if (AA->hasCallBaseContext())
2291       continue;
2292     // If the state is invalid, we do not try to manifest it.
2293     if (!State.isValidState())
2294       continue;
2295 
2296     if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2297       continue;
2298 
2299     // Skip dead code.
2300     bool UsedAssumedInformation = false;
2301     if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2302                       /* CheckBBLivenessOnly */ true))
2303       continue;
2304     // Check if the manifest debug counter that allows skipping manifestation of
2305     // AAs
2306     if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2307       continue;
2308     // Manifest the state and record if we changed the IR.
2309     ChangeStatus LocalChange = AA->manifest(*this);
2310     if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2311       AA->trackStatistics();
2312     LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2313                       << "\n");
2314 
2315     ManifestChange = ManifestChange | LocalChange;
2316 
2317     NumAtFixpoint++;
2318     NumManifested += (LocalChange == ChangeStatus::CHANGED);
2319   }
2320 
2321   (void)NumManifested;
2322   (void)NumAtFixpoint;
2323   LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2324                     << " arguments while " << NumAtFixpoint
2325                     << " were in a valid fixpoint state\n");
2326 
2327   NumAttributesManifested += NumManifested;
2328   NumAttributesValidFixpoint += NumAtFixpoint;
2329 
2330   (void)NumFinalAAs;
2331   if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2332     auto DepIt = DG.SyntheticRoot.Deps.begin();
2333     for (unsigned u = 0; u < NumFinalAAs; ++u)
2334       ++DepIt;
2335     for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2336          ++u, ++DepIt) {
2337       errs() << "Unexpected abstract attribute: "
2338              << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2339              << cast<AbstractAttribute>(DepIt->getPointer())
2340                     ->getIRPosition()
2341                     .getAssociatedValue()
2342              << "\n";
2343     }
2344     llvm_unreachable("Expected the final number of abstract attributes to "
2345                      "remain unchanged!");
2346   }
2347 
2348   for (auto &It : AttrsMap) {
2349     AttributeList &AL = It.getSecond();
2350     const IRPosition &IRP =
2351         isa<Function>(It.getFirst())
2352             ? IRPosition::function(*cast<Function>(It.getFirst()))
2353             : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2354     IRP.setAttrList(AL);
2355   }
2356 
2357   return ManifestChange;
2358 }
2359 
identifyDeadInternalFunctions()2360 void Attributor::identifyDeadInternalFunctions() {
2361   // Early exit if we don't intend to delete functions.
2362   if (!Configuration.DeleteFns)
2363     return;
2364 
2365   // To avoid triggering an assertion in the lazy call graph we will not delete
2366   // any internal library functions. We should modify the assertion though and
2367   // allow internals to be deleted.
2368   const auto *TLI =
2369       isModulePass()
2370           ? nullptr
2371           : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
2372   LibFunc LF;
2373 
2374   // Identify dead internal functions and delete them. This happens outside
2375   // the other fixpoint analysis as we might treat potentially dead functions
2376   // as live to lower the number of iterations. If they happen to be dead, the
2377   // below fixpoint loop will identify and eliminate them.
2378 
2379   SmallVector<Function *, 8> InternalFns;
2380   for (Function *F : Functions)
2381     if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2382       InternalFns.push_back(F);
2383 
2384   SmallPtrSet<Function *, 8> LiveInternalFns;
2385   bool FoundLiveInternal = true;
2386   while (FoundLiveInternal) {
2387     FoundLiveInternal = false;
2388     for (Function *&F : InternalFns) {
2389       if (!F)
2390         continue;
2391 
2392       bool UsedAssumedInformation = false;
2393       if (checkForAllCallSites(
2394               [&](AbstractCallSite ACS) {
2395                 Function *Callee = ACS.getInstruction()->getFunction();
2396                 return ToBeDeletedFunctions.count(Callee) ||
2397                        (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2398                         !LiveInternalFns.count(Callee));
2399               },
2400               *F, true, nullptr, UsedAssumedInformation)) {
2401         continue;
2402       }
2403 
2404       LiveInternalFns.insert(F);
2405       F = nullptr;
2406       FoundLiveInternal = true;
2407     }
2408   }
2409 
2410   for (Function *F : InternalFns)
2411     if (F)
2412       ToBeDeletedFunctions.insert(F);
2413 }
2414 
cleanupIR()2415 ChangeStatus Attributor::cleanupIR() {
2416   TimeTraceScope TimeScope("Attributor::cleanupIR");
2417   // Delete stuff at the end to avoid invalid references and a nice order.
2418   LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2419                     << ToBeDeletedFunctions.size() << " functions and "
2420                     << ToBeDeletedBlocks.size() << " blocks and "
2421                     << ToBeDeletedInsts.size() << " instructions and "
2422                     << ToBeChangedValues.size() << " values and "
2423                     << ToBeChangedUses.size() << " uses. To insert "
2424                     << ToBeChangedToUnreachableInsts.size()
2425                     << " unreachables.\n"
2426                     << "Preserve manifest added " << ManifestAddedBlocks.size()
2427                     << " blocks\n");
2428 
2429   SmallVector<WeakTrackingVH, 32> DeadInsts;
2430   SmallVector<Instruction *, 32> TerminatorsToFold;
2431 
2432   auto ReplaceUse = [&](Use *U, Value *NewV) {
2433     Value *OldV = U->get();
2434 
2435     // If we plan to replace NewV we need to update it at this point.
2436     do {
2437       const auto &Entry = ToBeChangedValues.lookup(NewV);
2438       if (!get<0>(Entry))
2439         break;
2440       NewV = get<0>(Entry);
2441     } while (true);
2442 
2443     Instruction *I = dyn_cast<Instruction>(U->getUser());
2444     assert((!I || isRunOn(*I->getFunction())) &&
2445            "Cannot replace an instruction outside the current SCC!");
2446 
2447     // Do not replace uses in returns if the value is a must-tail call we will
2448     // not delete.
2449     if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2450       if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2451         if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2452           return;
2453       // If we rewrite a return and the new value is not an argument, strip the
2454       // `returned` attribute as it is wrong now.
2455       if (!isa<Argument>(NewV))
2456         for (auto &Arg : RI->getFunction()->args())
2457           Arg.removeAttr(Attribute::Returned);
2458     }
2459 
2460     LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2461                       << " instead of " << *OldV << "\n");
2462     U->set(NewV);
2463 
2464     if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2465       CGModifiedFunctions.insert(I->getFunction());
2466       if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2467           isInstructionTriviallyDead(I))
2468         DeadInsts.push_back(I);
2469     }
2470     if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2471       auto *CB = cast<CallBase>(U->getUser());
2472       if (CB->isArgOperand(U)) {
2473         unsigned Idx = CB->getArgOperandNo(U);
2474         CB->removeParamAttr(Idx, Attribute::NoUndef);
2475         auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2476         if (Callee && Callee->arg_size() > Idx)
2477           Callee->removeParamAttr(Idx, Attribute::NoUndef);
2478       }
2479     }
2480     if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2481       Instruction *UserI = cast<Instruction>(U->getUser());
2482       if (isa<UndefValue>(NewV)) {
2483         ToBeChangedToUnreachableInsts.insert(UserI);
2484       } else {
2485         TerminatorsToFold.push_back(UserI);
2486       }
2487     }
2488   };
2489 
2490   for (auto &It : ToBeChangedUses) {
2491     Use *U = It.first;
2492     Value *NewV = It.second;
2493     ReplaceUse(U, NewV);
2494   }
2495 
2496   SmallVector<Use *, 4> Uses;
2497   for (auto &It : ToBeChangedValues) {
2498     Value *OldV = It.first;
2499     auto [NewV, Done] = It.second;
2500     Uses.clear();
2501     for (auto &U : OldV->uses())
2502       if (Done || !U.getUser()->isDroppable())
2503         Uses.push_back(&U);
2504     for (Use *U : Uses) {
2505       if (auto *I = dyn_cast<Instruction>(U->getUser()))
2506         if (!isRunOn(*I->getFunction()))
2507           continue;
2508       ReplaceUse(U, NewV);
2509     }
2510   }
2511 
2512   for (const auto &V : InvokeWithDeadSuccessor)
2513     if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2514       assert(isRunOn(*II->getFunction()) &&
2515              "Cannot replace an invoke outside the current SCC!");
2516       bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2517       bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2518       bool Invoke2CallAllowed =
2519           !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2520       assert((UnwindBBIsDead || NormalBBIsDead) &&
2521              "Invoke does not have dead successors!");
2522       BasicBlock *BB = II->getParent();
2523       BasicBlock *NormalDestBB = II->getNormalDest();
2524       if (UnwindBBIsDead) {
2525         Instruction *NormalNextIP = &NormalDestBB->front();
2526         if (Invoke2CallAllowed) {
2527           changeToCall(II);
2528           NormalNextIP = BB->getTerminator();
2529         }
2530         if (NormalBBIsDead)
2531           ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2532       } else {
2533         assert(NormalBBIsDead && "Broken invariant!");
2534         if (!NormalDestBB->getUniquePredecessor())
2535           NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2536         ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2537       }
2538     }
2539   for (Instruction *I : TerminatorsToFold) {
2540     assert(isRunOn(*I->getFunction()) &&
2541            "Cannot replace a terminator outside the current SCC!");
2542     CGModifiedFunctions.insert(I->getFunction());
2543     ConstantFoldTerminator(I->getParent());
2544   }
2545   for (const auto &V : ToBeChangedToUnreachableInsts)
2546     if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2547       LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2548                         << "\n");
2549       assert(isRunOn(*I->getFunction()) &&
2550              "Cannot replace an instruction outside the current SCC!");
2551       CGModifiedFunctions.insert(I->getFunction());
2552       changeToUnreachable(I);
2553     }
2554 
2555   for (const auto &V : ToBeDeletedInsts) {
2556     if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2557       assert((!isa<CallBase>(I) || isa<IntrinsicInst>(I) ||
2558               isRunOn(*I->getFunction())) &&
2559              "Cannot delete an instruction outside the current SCC!");
2560       I->dropDroppableUses();
2561       CGModifiedFunctions.insert(I->getFunction());
2562       if (!I->getType()->isVoidTy())
2563         I->replaceAllUsesWith(UndefValue::get(I->getType()));
2564       if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2565         DeadInsts.push_back(I);
2566       else
2567         I->eraseFromParent();
2568     }
2569   }
2570 
2571   llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2572 
2573   LLVM_DEBUG({
2574     dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2575     for (auto &I : DeadInsts)
2576       if (I)
2577         dbgs() << "  - " << *I << "\n";
2578   });
2579 
2580   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2581 
2582   if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2583     SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2584     ToBeDeletedBBs.reserve(NumDeadBlocks);
2585     for (BasicBlock *BB : ToBeDeletedBlocks) {
2586       assert(isRunOn(*BB->getParent()) &&
2587              "Cannot delete a block outside the current SCC!");
2588       CGModifiedFunctions.insert(BB->getParent());
2589       // Do not delete BBs added during manifests of AAs.
2590       if (ManifestAddedBlocks.contains(BB))
2591         continue;
2592       ToBeDeletedBBs.push_back(BB);
2593     }
2594     // Actually we do not delete the blocks but squash them into a single
2595     // unreachable but untangling branches that jump here is something we need
2596     // to do in a more generic way.
2597     detachDeadBlocks(ToBeDeletedBBs, nullptr);
2598   }
2599 
2600   identifyDeadInternalFunctions();
2601 
2602   // Rewrite the functions as requested during manifest.
2603   ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2604 
2605   for (Function *Fn : CGModifiedFunctions)
2606     if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2607       Configuration.CGUpdater.reanalyzeFunction(*Fn);
2608 
2609   for (Function *Fn : ToBeDeletedFunctions) {
2610     if (!Functions.count(Fn))
2611       continue;
2612     Configuration.CGUpdater.removeFunction(*Fn);
2613   }
2614 
2615   if (!ToBeChangedUses.empty())
2616     ManifestChange = ChangeStatus::CHANGED;
2617 
2618   if (!ToBeChangedToUnreachableInsts.empty())
2619     ManifestChange = ChangeStatus::CHANGED;
2620 
2621   if (!ToBeDeletedFunctions.empty())
2622     ManifestChange = ChangeStatus::CHANGED;
2623 
2624   if (!ToBeDeletedBlocks.empty())
2625     ManifestChange = ChangeStatus::CHANGED;
2626 
2627   if (!ToBeDeletedInsts.empty())
2628     ManifestChange = ChangeStatus::CHANGED;
2629 
2630   if (!InvokeWithDeadSuccessor.empty())
2631     ManifestChange = ChangeStatus::CHANGED;
2632 
2633   if (!DeadInsts.empty())
2634     ManifestChange = ChangeStatus::CHANGED;
2635 
2636   NumFnDeleted += ToBeDeletedFunctions.size();
2637 
2638   LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2639                     << " functions after manifest.\n");
2640 
2641 #ifdef EXPENSIVE_CHECKS
2642   for (Function *F : Functions) {
2643     if (ToBeDeletedFunctions.count(F))
2644       continue;
2645     assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2646   }
2647 #endif
2648 
2649   return ManifestChange;
2650 }
2651 
run()2652 ChangeStatus Attributor::run() {
2653   TimeTraceScope TimeScope("Attributor::run");
2654   AttributorCallGraph ACallGraph(*this);
2655 
2656   if (PrintCallGraph)
2657     ACallGraph.populateAll();
2658 
2659   Phase = AttributorPhase::UPDATE;
2660   runTillFixpoint();
2661 
2662   // dump graphs on demand
2663   if (DumpDepGraph)
2664     DG.dumpGraph();
2665 
2666   if (ViewDepGraph)
2667     DG.viewGraph();
2668 
2669   if (PrintDependencies)
2670     DG.print();
2671 
2672   Phase = AttributorPhase::MANIFEST;
2673   ChangeStatus ManifestChange = manifestAttributes();
2674 
2675   Phase = AttributorPhase::CLEANUP;
2676   ChangeStatus CleanupChange = cleanupIR();
2677 
2678   if (PrintCallGraph)
2679     ACallGraph.print();
2680 
2681   return ManifestChange | CleanupChange;
2682 }
2683 
updateAA(AbstractAttribute & AA)2684 ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2685   TimeTraceScope TimeScope("updateAA", [&]() {
2686     return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2687   });
2688   assert(Phase == AttributorPhase::UPDATE &&
2689          "We can update AA only in the update stage!");
2690 
2691   // Use a new dependence vector for this update.
2692   DependenceVector DV;
2693   DependenceStack.push_back(&DV);
2694 
2695   auto &AAState = AA.getState();
2696   ChangeStatus CS = ChangeStatus::UNCHANGED;
2697   bool UsedAssumedInformation = false;
2698   if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2699                      /* CheckBBLivenessOnly */ true))
2700     CS = AA.update(*this);
2701 
2702   if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2703     // If the AA did not rely on outside information but changed, we run it
2704     // again to see if it found a fixpoint. Most AAs do but we don't require
2705     // them to. Hence, it might take the AA multiple iterations to get to a
2706     // fixpoint even if it does not rely on outside information, which is fine.
2707     ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
2708     if (CS == ChangeStatus::CHANGED)
2709       RerunCS = AA.update(*this);
2710 
2711     // If the attribute did not change during the run or rerun, and it still did
2712     // not query any non-fix information, the state will not change and we can
2713     // indicate that right at this point.
2714     if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2715       AAState.indicateOptimisticFixpoint();
2716   }
2717 
2718   if (!AAState.isAtFixpoint())
2719     rememberDependences();
2720 
2721   // Verify the stack was used properly, that is we pop the dependence vector we
2722   // put there earlier.
2723   DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2724   (void)PoppedDV;
2725   assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2726 
2727   return CS;
2728 }
2729 
createShallowWrapper(Function & F)2730 void Attributor::createShallowWrapper(Function &F) {
2731   assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2732 
2733   Module &M = *F.getParent();
2734   LLVMContext &Ctx = M.getContext();
2735   FunctionType *FnTy = F.getFunctionType();
2736 
2737   Function *Wrapper =
2738       Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2739   F.setName(""); // set the inside function anonymous
2740   M.getFunctionList().insert(F.getIterator(), Wrapper);
2741   // Flag whether the function is using new-debug-info or not.
2742   Wrapper->IsNewDbgInfoFormat = M.IsNewDbgInfoFormat;
2743 
2744   F.setLinkage(GlobalValue::InternalLinkage);
2745 
2746   F.replaceAllUsesWith(Wrapper);
2747   assert(F.use_empty() && "Uses remained after wrapper was created!");
2748 
2749   // Move the COMDAT section to the wrapper.
2750   // TODO: Check if we need to keep it for F as well.
2751   Wrapper->setComdat(F.getComdat());
2752   F.setComdat(nullptr);
2753 
2754   // Copy all metadata and attributes but keep them on F as well.
2755   SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2756   F.getAllMetadata(MDs);
2757   for (auto MDIt : MDs)
2758     Wrapper->addMetadata(MDIt.first, *MDIt.second);
2759   Wrapper->setAttributes(F.getAttributes());
2760 
2761   // Create the call in the wrapper.
2762   BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2763 
2764   SmallVector<Value *, 8> Args;
2765   Argument *FArgIt = F.arg_begin();
2766   for (Argument &Arg : Wrapper->args()) {
2767     Args.push_back(&Arg);
2768     Arg.setName((FArgIt++)->getName());
2769   }
2770 
2771   CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2772   CI->setTailCall(true);
2773   CI->addFnAttr(Attribute::NoInline);
2774   ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2775 
2776   NumFnShallowWrappersCreated++;
2777 }
2778 
isInternalizable(Function & F)2779 bool Attributor::isInternalizable(Function &F) {
2780   if (F.isDeclaration() || F.hasLocalLinkage() ||
2781       GlobalValue::isInterposableLinkage(F.getLinkage()))
2782     return false;
2783   return true;
2784 }
2785 
internalizeFunction(Function & F,bool Force)2786 Function *Attributor::internalizeFunction(Function &F, bool Force) {
2787   if (!AllowDeepWrapper && !Force)
2788     return nullptr;
2789   if (!isInternalizable(F))
2790     return nullptr;
2791 
2792   SmallPtrSet<Function *, 2> FnSet = {&F};
2793   DenseMap<Function *, Function *> InternalizedFns;
2794   internalizeFunctions(FnSet, InternalizedFns);
2795 
2796   return InternalizedFns[&F];
2797 }
2798 
internalizeFunctions(SmallPtrSetImpl<Function * > & FnSet,DenseMap<Function *,Function * > & FnMap)2799 bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2800                                       DenseMap<Function *, Function *> &FnMap) {
2801   for (Function *F : FnSet)
2802     if (!Attributor::isInternalizable(*F))
2803       return false;
2804 
2805   FnMap.clear();
2806   // Generate the internalized version of each function.
2807   for (Function *F : FnSet) {
2808     Module &M = *F->getParent();
2809     FunctionType *FnTy = F->getFunctionType();
2810 
2811     // Create a copy of the current function
2812     Function *Copied =
2813         Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2814                          F->getName() + ".internalized");
2815     ValueToValueMapTy VMap;
2816     auto *NewFArgIt = Copied->arg_begin();
2817     for (auto &Arg : F->args()) {
2818       auto ArgName = Arg.getName();
2819       NewFArgIt->setName(ArgName);
2820       VMap[&Arg] = &(*NewFArgIt++);
2821     }
2822     SmallVector<ReturnInst *, 8> Returns;
2823     // Flag whether the function is using new-debug-info or not.
2824     Copied->IsNewDbgInfoFormat = F->IsNewDbgInfoFormat;
2825 
2826     // Copy the body of the original function to the new one
2827     CloneFunctionInto(Copied, F, VMap,
2828                       CloneFunctionChangeType::LocalChangesOnly, Returns);
2829 
2830     // Set the linakage and visibility late as CloneFunctionInto has some
2831     // implicit requirements.
2832     Copied->setVisibility(GlobalValue::DefaultVisibility);
2833     Copied->setLinkage(GlobalValue::PrivateLinkage);
2834 
2835     // Copy metadata
2836     SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2837     F->getAllMetadata(MDs);
2838     for (auto MDIt : MDs)
2839       if (!Copied->hasMetadata())
2840         Copied->addMetadata(MDIt.first, *MDIt.second);
2841 
2842     M.getFunctionList().insert(F->getIterator(), Copied);
2843     Copied->setDSOLocal(true);
2844     FnMap[F] = Copied;
2845   }
2846 
2847   // Replace all uses of the old function with the new internalized function
2848   // unless the caller is a function that was just internalized.
2849   for (Function *F : FnSet) {
2850     auto &InternalizedFn = FnMap[F];
2851     auto IsNotInternalized = [&](Use &U) -> bool {
2852       if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2853         return !FnMap.lookup(CB->getCaller());
2854       return false;
2855     };
2856     F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2857   }
2858 
2859   return true;
2860 }
2861 
isValidFunctionSignatureRewrite(Argument & Arg,ArrayRef<Type * > ReplacementTypes)2862 bool Attributor::isValidFunctionSignatureRewrite(
2863     Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2864 
2865   if (!Configuration.RewriteSignatures)
2866     return false;
2867 
2868   Function *Fn = Arg.getParent();
2869   auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2870     // Forbid the call site to cast the function return type. If we need to
2871     // rewrite these functions we need to re-create a cast for the new call site
2872     // (if the old had uses).
2873     if (!ACS.getCalledFunction() ||
2874         ACS.getInstruction()->getType() !=
2875             ACS.getCalledFunction()->getReturnType())
2876       return false;
2877     if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2878         Fn->getType())
2879       return false;
2880     if (ACS.getNumArgOperands() != Fn->arg_size())
2881       return false;
2882     // Forbid must-tail calls for now.
2883     return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2884   };
2885 
2886   // Avoid var-arg functions for now.
2887   if (Fn->isVarArg()) {
2888     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2889     return false;
2890   }
2891 
2892   // Avoid functions with complicated argument passing semantics.
2893   AttributeList FnAttributeList = Fn->getAttributes();
2894   if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2895       FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2896       FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2897       FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2898     LLVM_DEBUG(
2899         dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2900     return false;
2901   }
2902 
2903   // Avoid callbacks for now.
2904   bool UsedAssumedInformation = false;
2905   if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2906                             UsedAssumedInformation,
2907                             /* CheckPotentiallyDead */ true)) {
2908     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2909     return false;
2910   }
2911 
2912   auto InstPred = [](Instruction &I) {
2913     if (auto *CI = dyn_cast<CallInst>(&I))
2914       return !CI->isMustTailCall();
2915     return true;
2916   };
2917 
2918   // Forbid must-tail calls for now.
2919   // TODO:
2920   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2921   if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2922                                    nullptr, {Instruction::Call},
2923                                    UsedAssumedInformation)) {
2924     LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2925     return false;
2926   }
2927 
2928   return true;
2929 }
2930 
registerFunctionSignatureRewrite(Argument & Arg,ArrayRef<Type * > ReplacementTypes,ArgumentReplacementInfo::CalleeRepairCBTy && CalleeRepairCB,ArgumentReplacementInfo::ACSRepairCBTy && ACSRepairCB)2931 bool Attributor::registerFunctionSignatureRewrite(
2932     Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2933     ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2934     ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2935   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2936                     << Arg.getParent()->getName() << " with "
2937                     << ReplacementTypes.size() << " replacements\n");
2938   assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2939          "Cannot register an invalid rewrite");
2940 
2941   Function *Fn = Arg.getParent();
2942   SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2943       ArgumentReplacementMap[Fn];
2944   if (ARIs.empty())
2945     ARIs.resize(Fn->arg_size());
2946 
2947   // If we have a replacement already with less than or equal new arguments,
2948   // ignore this request.
2949   std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2950   if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2951     LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2952     return false;
2953   }
2954 
2955   // If we have a replacement already but we like the new one better, delete
2956   // the old.
2957   ARI.reset();
2958 
2959   LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2960                     << Arg.getParent()->getName() << " with "
2961                     << ReplacementTypes.size() << " replacements\n");
2962 
2963   // Remember the replacement.
2964   ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2965                                         std::move(CalleeRepairCB),
2966                                         std::move(ACSRepairCB)));
2967 
2968   return true;
2969 }
2970 
shouldSeedAttribute(AbstractAttribute & AA)2971 bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2972   bool Result = true;
2973 #ifndef NDEBUG
2974   if (SeedAllowList.size() != 0)
2975     Result = llvm::is_contained(SeedAllowList, AA.getName());
2976   Function *Fn = AA.getAnchorScope();
2977   if (FunctionSeedAllowList.size() != 0 && Fn)
2978     Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2979 #endif
2980   return Result;
2981 }
2982 
rewriteFunctionSignatures(SmallSetVector<Function *,8> & ModifiedFns)2983 ChangeStatus Attributor::rewriteFunctionSignatures(
2984     SmallSetVector<Function *, 8> &ModifiedFns) {
2985   ChangeStatus Changed = ChangeStatus::UNCHANGED;
2986 
2987   for (auto &It : ArgumentReplacementMap) {
2988     Function *OldFn = It.getFirst();
2989 
2990     // Deleted functions do not require rewrites.
2991     if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2992       continue;
2993 
2994     const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2995         It.getSecond();
2996     assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2997 
2998     SmallVector<Type *, 16> NewArgumentTypes;
2999     SmallVector<AttributeSet, 16> NewArgumentAttributes;
3000 
3001     // Collect replacement argument types and copy over existing attributes.
3002     AttributeList OldFnAttributeList = OldFn->getAttributes();
3003     for (Argument &Arg : OldFn->args()) {
3004       if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3005               ARIs[Arg.getArgNo()]) {
3006         NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
3007                                 ARI->ReplacementTypes.end());
3008         NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
3009                                      AttributeSet());
3010       } else {
3011         NewArgumentTypes.push_back(Arg.getType());
3012         NewArgumentAttributes.push_back(
3013             OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
3014       }
3015     }
3016 
3017     uint64_t LargestVectorWidth = 0;
3018     for (auto *I : NewArgumentTypes)
3019       if (auto *VT = dyn_cast<llvm::VectorType>(I))
3020         LargestVectorWidth =
3021             std::max(LargestVectorWidth,
3022                      VT->getPrimitiveSizeInBits().getKnownMinValue());
3023 
3024     FunctionType *OldFnTy = OldFn->getFunctionType();
3025     Type *RetTy = OldFnTy->getReturnType();
3026 
3027     // Construct the new function type using the new arguments types.
3028     FunctionType *NewFnTy =
3029         FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3030 
3031     LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3032                       << "' from " << *OldFn->getFunctionType() << " to "
3033                       << *NewFnTy << "\n");
3034 
3035     // Create the new function body and insert it into the module.
3036     Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3037                                        OldFn->getAddressSpace(), "");
3038     Functions.insert(NewFn);
3039     OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3040     NewFn->takeName(OldFn);
3041     NewFn->copyAttributesFrom(OldFn);
3042     // Flag whether the function is using new-debug-info or not.
3043     NewFn->IsNewDbgInfoFormat = OldFn->IsNewDbgInfoFormat;
3044 
3045     // Patch the pointer to LLVM function in debug info descriptor.
3046     NewFn->setSubprogram(OldFn->getSubprogram());
3047     OldFn->setSubprogram(nullptr);
3048 
3049     // Recompute the parameter attributes list based on the new arguments for
3050     // the function.
3051     LLVMContext &Ctx = OldFn->getContext();
3052     NewFn->setAttributes(AttributeList::get(
3053         Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3054         NewArgumentAttributes));
3055     AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3056 
3057     // Remove argmem from the memory effects if we have no more pointer
3058     // arguments, or they are readnone.
3059     MemoryEffects ME = NewFn->getMemoryEffects();
3060     int ArgNo = -1;
3061     if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3062           ++ArgNo;
3063           return !T->isPtrOrPtrVectorTy() ||
3064                  NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3065         })) {
3066       NewFn->setMemoryEffects(ME - MemoryEffects::argMemOnly());
3067     }
3068 
3069     // Since we have now created the new function, splice the body of the old
3070     // function right into the new function, leaving the old rotting hulk of the
3071     // function empty.
3072     NewFn->splice(NewFn->begin(), OldFn);
3073 
3074     // Fixup block addresses to reference new function.
3075     SmallVector<BlockAddress *, 8u> BlockAddresses;
3076     for (User *U : OldFn->users())
3077       if (auto *BA = dyn_cast<BlockAddress>(U))
3078         BlockAddresses.push_back(BA);
3079     for (auto *BA : BlockAddresses)
3080       BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
3081 
3082     // Set of all "call-like" instructions that invoke the old function mapped
3083     // to their new replacements.
3084     SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
3085 
3086     // Callback to create a new "call-like" instruction for a given one.
3087     auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3088       CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3089       const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3090 
3091       // Collect the new argument operands for the replacement call site.
3092       SmallVector<Value *, 16> NewArgOperands;
3093       SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3094       for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3095         unsigned NewFirstArgNum = NewArgOperands.size();
3096         (void)NewFirstArgNum; // only used inside assert.
3097         if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3098                 ARIs[OldArgNum]) {
3099           if (ARI->ACSRepairCB)
3100             ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3101           assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3102                      NewArgOperands.size() &&
3103                  "ACS repair callback did not provide as many operand as new "
3104                  "types were registered!");
3105           // TODO: Exose the attribute set to the ACS repair callback
3106           NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3107                                          AttributeSet());
3108         } else {
3109           NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3110           NewArgOperandAttributes.push_back(
3111               OldCallAttributeList.getParamAttrs(OldArgNum));
3112         }
3113       }
3114 
3115       assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3116              "Mismatch # argument operands vs. # argument operand attributes!");
3117       assert(NewArgOperands.size() == NewFn->arg_size() &&
3118              "Mismatch # argument operands vs. # function arguments!");
3119 
3120       SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3121       OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3122 
3123       // Create a new call or invoke instruction to replace the old one.
3124       CallBase *NewCB;
3125       if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3126         NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3127                                    II->getUnwindDest(), NewArgOperands,
3128                                    OperandBundleDefs, "", OldCB->getIterator());
3129       } else {
3130         auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3131                                        "", OldCB->getIterator());
3132         NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3133         NewCB = NewCI;
3134       }
3135 
3136       // Copy over various properties and the new attributes.
3137       NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3138       NewCB->setCallingConv(OldCB->getCallingConv());
3139       NewCB->takeName(OldCB);
3140       NewCB->setAttributes(AttributeList::get(
3141           Ctx, OldCallAttributeList.getFnAttrs(),
3142           OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3143 
3144       AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
3145                                                     LargestVectorWidth);
3146 
3147       CallSitePairs.push_back({OldCB, NewCB});
3148       return true;
3149     };
3150 
3151     // Use the CallSiteReplacementCreator to create replacement call sites.
3152     bool UsedAssumedInformation = false;
3153     bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3154                                         true, nullptr, UsedAssumedInformation,
3155                                         /* CheckPotentiallyDead */ true);
3156     (void)Success;
3157     assert(Success && "Assumed call site replacement to succeed!");
3158 
3159     // Rewire the arguments.
3160     Argument *OldFnArgIt = OldFn->arg_begin();
3161     Argument *NewFnArgIt = NewFn->arg_begin();
3162     for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3163          ++OldArgNum, ++OldFnArgIt) {
3164       if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3165               ARIs[OldArgNum]) {
3166         if (ARI->CalleeRepairCB)
3167           ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3168         if (ARI->ReplacementTypes.empty())
3169           OldFnArgIt->replaceAllUsesWith(
3170               PoisonValue::get(OldFnArgIt->getType()));
3171         NewFnArgIt += ARI->ReplacementTypes.size();
3172       } else {
3173         NewFnArgIt->takeName(&*OldFnArgIt);
3174         OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3175         ++NewFnArgIt;
3176       }
3177     }
3178 
3179     // Eliminate the instructions *after* we visited all of them.
3180     for (auto &CallSitePair : CallSitePairs) {
3181       CallBase &OldCB = *CallSitePair.first;
3182       CallBase &NewCB = *CallSitePair.second;
3183       assert(OldCB.getType() == NewCB.getType() &&
3184              "Cannot handle call sites with different types!");
3185       ModifiedFns.insert(OldCB.getFunction());
3186       OldCB.replaceAllUsesWith(&NewCB);
3187       OldCB.eraseFromParent();
3188     }
3189 
3190     // Replace the function in the call graph (if any).
3191     Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3192 
3193     // If the old function was modified and needed to be reanalyzed, the new one
3194     // does now.
3195     if (ModifiedFns.remove(OldFn))
3196       ModifiedFns.insert(NewFn);
3197 
3198     Changed = ChangeStatus::CHANGED;
3199   }
3200 
3201   return Changed;
3202 }
3203 
initializeInformationCache(const Function & CF,FunctionInfo & FI)3204 void InformationCache::initializeInformationCache(const Function &CF,
3205                                                   FunctionInfo &FI) {
3206   // As we do not modify the function here we can remove the const
3207   // withouth breaking implicit assumptions. At the end of the day, we could
3208   // initialize the cache eagerly which would look the same to the users.
3209   Function &F = const_cast<Function &>(CF);
3210 
3211   // Walk all instructions to find interesting instructions that might be
3212   // queried by abstract attributes during their initialization or update.
3213   // This has to happen before we create attributes.
3214 
3215   DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3216 
3217   // Add \p V to the assume uses map which track the number of uses outside of
3218   // "visited" assumes. If no outside uses are left the value is added to the
3219   // assume only use vector.
3220   auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3221     SmallVector<const Instruction *> Worklist;
3222     if (auto *I = dyn_cast<Instruction>(&V))
3223       Worklist.push_back(I);
3224     while (!Worklist.empty()) {
3225       const Instruction *I = Worklist.pop_back_val();
3226       std::optional<short> &NumUses = AssumeUsesMap[I];
3227       if (!NumUses)
3228         NumUses = I->getNumUses();
3229       NumUses = *NumUses - /* this assume */ 1;
3230       if (*NumUses != 0)
3231         continue;
3232       AssumeOnlyValues.insert(I);
3233       for (const Value *Op : I->operands())
3234         if (auto *OpI = dyn_cast<Instruction>(Op))
3235           Worklist.push_back(OpI);
3236     }
3237   };
3238 
3239   for (Instruction &I : instructions(&F)) {
3240     bool IsInterestingOpcode = false;
3241 
3242     // To allow easy access to all instructions in a function with a given
3243     // opcode we store them in the InfoCache. As not all opcodes are interesting
3244     // to concrete attributes we only cache the ones that are as identified in
3245     // the following switch.
3246     // Note: There are no concrete attributes now so this is initially empty.
3247     switch (I.getOpcode()) {
3248     default:
3249       assert(!isa<CallBase>(&I) &&
3250              "New call base instruction type needs to be known in the "
3251              "Attributor.");
3252       break;
3253     case Instruction::Call:
3254       // Calls are interesting on their own, additionally:
3255       // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3256       // For `must-tail` calls we remember the caller and callee.
3257       if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3258         AssumeOnlyValues.insert(Assume);
3259         fillMapFromAssume(*Assume, KnowledgeMap);
3260         AddToAssumeUsesMap(*Assume->getArgOperand(0));
3261       } else if (cast<CallInst>(I).isMustTailCall()) {
3262         FI.ContainsMustTailCall = true;
3263         if (auto *Callee = dyn_cast_if_present<Function>(
3264                 cast<CallInst>(I).getCalledOperand()))
3265           getFunctionInfo(*Callee).CalledViaMustTail = true;
3266       }
3267       [[fallthrough]];
3268     case Instruction::CallBr:
3269     case Instruction::Invoke:
3270     case Instruction::CleanupRet:
3271     case Instruction::CatchSwitch:
3272     case Instruction::AtomicRMW:
3273     case Instruction::AtomicCmpXchg:
3274     case Instruction::Br:
3275     case Instruction::Resume:
3276     case Instruction::Ret:
3277     case Instruction::Load:
3278       // The alignment of a pointer is interesting for loads.
3279     case Instruction::Store:
3280       // The alignment of a pointer is interesting for stores.
3281     case Instruction::Alloca:
3282     case Instruction::AddrSpaceCast:
3283       IsInterestingOpcode = true;
3284     }
3285     if (IsInterestingOpcode) {
3286       auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3287       if (!Insts)
3288         Insts = new (Allocator) InstructionVectorTy();
3289       Insts->push_back(&I);
3290     }
3291     if (I.mayReadOrWriteMemory())
3292       FI.RWInsts.push_back(&I);
3293   }
3294 
3295   if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3296       isInlineViable(F).isSuccess())
3297     InlineableFunctions.insert(&F);
3298 }
3299 
~FunctionInfo()3300 InformationCache::FunctionInfo::~FunctionInfo() {
3301   // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3302   // manually destroy them.
3303   for (auto &It : OpcodeInstMap)
3304     It.getSecond()->~InstructionVectorTy();
3305 }
3306 
3307 const ArrayRef<Function *>
getIndirectlyCallableFunctions(Attributor & A) const3308 InformationCache::getIndirectlyCallableFunctions(Attributor &A) const {
3309   assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3310   return IndirectlyCallableFunctions;
3311 }
3312 
recordDependence(const AbstractAttribute & FromAA,const AbstractAttribute & ToAA,DepClassTy DepClass)3313 void Attributor::recordDependence(const AbstractAttribute &FromAA,
3314                                   const AbstractAttribute &ToAA,
3315                                   DepClassTy DepClass) {
3316   if (DepClass == DepClassTy::NONE)
3317     return;
3318   // If we are outside of an update, thus before the actual fixpoint iteration
3319   // started (= when we create AAs), we do not track dependences because we will
3320   // put all AAs into the initial worklist anyway.
3321   if (DependenceStack.empty())
3322     return;
3323   if (FromAA.getState().isAtFixpoint())
3324     return;
3325   DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3326 }
3327 
rememberDependences()3328 void Attributor::rememberDependences() {
3329   assert(!DependenceStack.empty() && "No dependences to remember!");
3330 
3331   for (DepInfo &DI : *DependenceStack.back()) {
3332     assert((DI.DepClass == DepClassTy::REQUIRED ||
3333             DI.DepClass == DepClassTy::OPTIONAL) &&
3334            "Expected required or optional dependence (1 bit)!");
3335     auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3336     DepAAs.insert(AbstractAttribute::DepTy(
3337         const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3338   }
3339 }
3340 
3341 template <Attribute::AttrKind AK, typename AAType>
checkAndQueryIRAttr(const IRPosition & IRP,AttributeSet Attrs)3342 void Attributor::checkAndQueryIRAttr(const IRPosition &IRP,
3343                                      AttributeSet Attrs) {
3344   bool IsKnown;
3345   if (!Attrs.hasAttribute(AK))
3346     if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3347       if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3348                                     IsKnown))
3349         getOrCreateAAFor<AAType>(IRP);
3350 }
3351 
identifyDefaultAbstractAttributes(Function & F)3352 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
3353   if (!VisitedFunctions.insert(&F).second)
3354     return;
3355   if (F.isDeclaration())
3356     return;
3357 
3358   // In non-module runs we need to look at the call sites of a function to
3359   // determine if it is part of a must-tail call edge. This will influence what
3360   // attributes we can derive.
3361   InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3362   if (!isModulePass() && !FI.CalledViaMustTail) {
3363     for (const Use &U : F.uses())
3364       if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3365         if (CB->isCallee(&U) && CB->isMustTailCall())
3366           FI.CalledViaMustTail = true;
3367   }
3368 
3369   IRPosition FPos = IRPosition::function(F);
3370   bool IsIPOAmendable = isFunctionIPOAmendable(F);
3371   auto Attrs = F.getAttributes();
3372   auto FnAttrs = Attrs.getFnAttrs();
3373 
3374   // Check for dead BasicBlocks in every function.
3375   // We need dead instruction detection because we do not want to deal with
3376   // broken IR in which SSA rules do not apply.
3377   getOrCreateAAFor<AAIsDead>(FPos);
3378 
3379   // Every function might contain instructions that cause "undefined
3380   // behavior".
3381   getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3382 
3383   // Every function might be applicable for Heap-To-Stack conversion.
3384   if (EnableHeapToStack)
3385     getOrCreateAAFor<AAHeapToStack>(FPos);
3386 
3387   // Every function might be "must-progress".
3388   checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3389 
3390   // Every function might be "no-free".
3391   checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3392 
3393   // Every function might be "will-return".
3394   checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3395 
3396   // Every function might be marked "nosync"
3397   checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3398 
3399   // Everything that is visible from the outside (=function, argument, return
3400   // positions), cannot be changed if the function is not IPO amendable. We can
3401   // however analyse the code inside.
3402   if (IsIPOAmendable) {
3403 
3404     // Every function can be nounwind.
3405     checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3406 
3407     // Every function might be "no-return".
3408     checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3409 
3410     // Every function might be "no-recurse".
3411     checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3412 
3413     // Every function can be "non-convergent".
3414     if (Attrs.hasFnAttr(Attribute::Convergent))
3415       getOrCreateAAFor<AANonConvergent>(FPos);
3416 
3417     // Every function might be "readnone/readonly/writeonly/...".
3418     getOrCreateAAFor<AAMemoryBehavior>(FPos);
3419 
3420     // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3421     getOrCreateAAFor<AAMemoryLocation>(FPos);
3422 
3423     // Every function can track active assumptions.
3424     getOrCreateAAFor<AAAssumptionInfo>(FPos);
3425 
3426     // If we're not using a dynamic mode for float, there's nothing worthwhile
3427     // to infer. This misses the edge case denormal-fp-math="dynamic" and
3428     // denormal-fp-math-f32=something, but that likely has no real world use.
3429     DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3430     if (Mode.Input == DenormalMode::Dynamic ||
3431         Mode.Output == DenormalMode::Dynamic)
3432       getOrCreateAAFor<AADenormalFPMath>(FPos);
3433 
3434     // Return attributes are only appropriate if the return type is non void.
3435     Type *ReturnType = F.getReturnType();
3436     if (!ReturnType->isVoidTy()) {
3437       IRPosition RetPos = IRPosition::returned(F);
3438       AttributeSet RetAttrs = Attrs.getRetAttrs();
3439 
3440       // Every returned value might be dead.
3441       getOrCreateAAFor<AAIsDead>(RetPos);
3442 
3443       // Every function might be simplified.
3444       bool UsedAssumedInformation = false;
3445       getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3446                            AA::Intraprocedural);
3447 
3448       // Every returned value might be marked noundef.
3449       checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3450 
3451       if (ReturnType->isPointerTy()) {
3452 
3453         // Every function with pointer return type might be marked align.
3454         getOrCreateAAFor<AAAlign>(RetPos);
3455 
3456         // Every function with pointer return type might be marked nonnull.
3457         checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3458 
3459         // Every function with pointer return type might be marked noalias.
3460         checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3461 
3462         // Every function with pointer return type might be marked
3463         // dereferenceable.
3464         getOrCreateAAFor<AADereferenceable>(RetPos);
3465       } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3466         getOrCreateAAFor<AANoFPClass>(RetPos);
3467       }
3468     }
3469   }
3470 
3471   for (Argument &Arg : F.args()) {
3472     IRPosition ArgPos = IRPosition::argument(Arg);
3473     auto ArgNo = Arg.getArgNo();
3474     AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3475 
3476     if (!IsIPOAmendable) {
3477       if (Arg.getType()->isPointerTy())
3478         // Every argument with pointer type might be marked nofree.
3479         checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3480       continue;
3481     }
3482 
3483     // Every argument might be simplified. We have to go through the
3484     // Attributor interface though as outside AAs can register custom
3485     // simplification callbacks.
3486     bool UsedAssumedInformation = false;
3487     getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3488                          AA::Intraprocedural);
3489 
3490     // Every argument might be dead.
3491     getOrCreateAAFor<AAIsDead>(ArgPos);
3492 
3493     // Every argument might be marked noundef.
3494     checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3495 
3496     if (Arg.getType()->isPointerTy()) {
3497       // Every argument with pointer type might be marked nonnull.
3498       checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3499 
3500       // Every argument with pointer type might be marked noalias.
3501       checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3502 
3503       // Every argument with pointer type might be marked dereferenceable.
3504       getOrCreateAAFor<AADereferenceable>(ArgPos);
3505 
3506       // Every argument with pointer type might be marked align.
3507       getOrCreateAAFor<AAAlign>(ArgPos);
3508 
3509       // Every argument with pointer type might be marked nocapture.
3510       checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(ArgPos, ArgAttrs);
3511 
3512       // Every argument with pointer type might be marked
3513       // "readnone/readonly/writeonly/..."
3514       getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3515 
3516       // Every argument with pointer type might be marked nofree.
3517       checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3518 
3519       // Every argument with pointer type might be privatizable (or
3520       // promotable)
3521       getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3522     } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3523       getOrCreateAAFor<AANoFPClass>(ArgPos);
3524     }
3525   }
3526 
3527   auto CallSitePred = [&](Instruction &I) -> bool {
3528     auto &CB = cast<CallBase>(I);
3529     IRPosition CBInstPos = IRPosition::inst(CB);
3530     IRPosition CBFnPos = IRPosition::callsite_function(CB);
3531 
3532     // Call sites might be dead if they do not have side effects and no live
3533     // users. The return value might be dead if there are no live users.
3534     getOrCreateAAFor<AAIsDead>(CBInstPos);
3535 
3536     Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3537     // TODO: Even if the callee is not known now we might be able to simplify
3538     //       the call/callee.
3539     if (!Callee) {
3540       getOrCreateAAFor<AAIndirectCallInfo>(CBFnPos);
3541       return true;
3542     }
3543 
3544     // Every call site can track active assumptions.
3545     getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3546 
3547     // Skip declarations except if annotations on their call sites were
3548     // explicitly requested.
3549     if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3550         !Callee->hasMetadata(LLVMContext::MD_callback))
3551       return true;
3552 
3553     if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3554       IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3555       bool UsedAssumedInformation = false;
3556       getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3557                            AA::Intraprocedural);
3558 
3559       if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3560         getOrCreateAAFor<AANoFPClass>(CBInstPos);
3561     }
3562 
3563     const AttributeList &CBAttrs = CBFnPos.getAttrList();
3564     for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3565 
3566       IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);
3567       AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3568 
3569       // Every call site argument might be dead.
3570       getOrCreateAAFor<AAIsDead>(CBArgPos);
3571 
3572       // Call site argument might be simplified. We have to go through the
3573       // Attributor interface though as outside AAs can register custom
3574       // simplification callbacks.
3575       bool UsedAssumedInformation = false;
3576       getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3577                            AA::Intraprocedural);
3578 
3579       // Every call site argument might be marked "noundef".
3580       checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3581 
3582       Type *ArgTy = CB.getArgOperand(I)->getType();
3583 
3584       if (!ArgTy->isPointerTy()) {
3585         if (AttributeFuncs::isNoFPClassCompatibleType(ArgTy))
3586           getOrCreateAAFor<AANoFPClass>(CBArgPos);
3587 
3588         continue;
3589       }
3590 
3591       // Call site argument attribute "non-null".
3592       checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3593 
3594       // Call site argument attribute "nocapture".
3595       checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(CBArgPos,
3596                                                              CBArgAttrs);
3597 
3598       // Call site argument attribute "no-alias".
3599       checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3600 
3601       // Call site argument attribute "dereferenceable".
3602       getOrCreateAAFor<AADereferenceable>(CBArgPos);
3603 
3604       // Call site argument attribute "align".
3605       getOrCreateAAFor<AAAlign>(CBArgPos);
3606 
3607       // Call site argument attribute
3608       // "readnone/readonly/writeonly/..."
3609       if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3610         getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3611 
3612       // Call site argument attribute "nofree".
3613       checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3614     }
3615     return true;
3616   };
3617 
3618   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3619   [[maybe_unused]] bool Success;
3620   bool UsedAssumedInformation = false;
3621   Success = checkForAllInstructionsImpl(
3622       nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3623       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3624        (unsigned)Instruction::Call},
3625       UsedAssumedInformation);
3626   assert(Success && "Expected the check call to be successful!");
3627 
3628   auto LoadStorePred = [&](Instruction &I) -> bool {
3629     if (auto *LI = dyn_cast<LoadInst>(&I)) {
3630       getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3631       if (SimplifyAllLoads)
3632         getAssumedSimplified(IRPosition::value(I), nullptr,
3633                              UsedAssumedInformation, AA::Intraprocedural);
3634       getOrCreateAAFor<AAAddressSpace>(
3635           IRPosition::value(*LI->getPointerOperand()));
3636     } else {
3637       auto &SI = cast<StoreInst>(I);
3638       getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3639       getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3640                            UsedAssumedInformation, AA::Intraprocedural);
3641       getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3642       getOrCreateAAFor<AAAddressSpace>(
3643           IRPosition::value(*SI.getPointerOperand()));
3644     }
3645     return true;
3646   };
3647   Success = checkForAllInstructionsImpl(
3648       nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3649       {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3650       UsedAssumedInformation);
3651   assert(Success && "Expected the check call to be successful!");
3652 
3653   // AllocaInstPredicate
3654   auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3655     getOrCreateAAFor<AAAllocationInfo>(IRPosition::value(I));
3656     return true;
3657   };
3658 
3659   Success = checkForAllInstructionsImpl(
3660       nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3661       {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3662   assert(Success && "Expected the check call to be successful!");
3663 }
3664 
isClosedWorldModule() const3665 bool Attributor::isClosedWorldModule() const {
3666   if (CloseWorldAssumption.getNumOccurrences())
3667     return CloseWorldAssumption;
3668   return isModulePass() && Configuration.IsClosedWorldModule;
3669 }
3670 
3671 /// Helpers to ease debugging through output streams and print calls.
3672 ///
3673 ///{
operator <<(raw_ostream & OS,ChangeStatus S)3674 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3675   return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3676 }
3677 
operator <<(raw_ostream & OS,IRPosition::Kind AP)3678 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3679   switch (AP) {
3680   case IRPosition::IRP_INVALID:
3681     return OS << "inv";
3682   case IRPosition::IRP_FLOAT:
3683     return OS << "flt";
3684   case IRPosition::IRP_RETURNED:
3685     return OS << "fn_ret";
3686   case IRPosition::IRP_CALL_SITE_RETURNED:
3687     return OS << "cs_ret";
3688   case IRPosition::IRP_FUNCTION:
3689     return OS << "fn";
3690   case IRPosition::IRP_CALL_SITE:
3691     return OS << "cs";
3692   case IRPosition::IRP_ARGUMENT:
3693     return OS << "arg";
3694   case IRPosition::IRP_CALL_SITE_ARGUMENT:
3695     return OS << "cs_arg";
3696   }
3697   llvm_unreachable("Unknown attribute position!");
3698 }
3699 
operator <<(raw_ostream & OS,const IRPosition & Pos)3700 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3701   const Value &AV = Pos.getAssociatedValue();
3702   OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3703      << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3704 
3705   if (Pos.hasCallBaseContext())
3706     OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3707   return OS << "}";
3708 }
3709 
operator <<(raw_ostream & OS,const IntegerRangeState & S)3710 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3711   OS << "range-state(" << S.getBitWidth() << ")<";
3712   S.getKnown().print(OS);
3713   OS << " / ";
3714   S.getAssumed().print(OS);
3715   OS << ">";
3716 
3717   return OS << static_cast<const AbstractState &>(S);
3718 }
3719 
operator <<(raw_ostream & OS,const AbstractState & S)3720 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3721   return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3722 }
3723 
operator <<(raw_ostream & OS,const AbstractAttribute & AA)3724 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3725   AA.print(OS);
3726   return OS;
3727 }
3728 
operator <<(raw_ostream & OS,const PotentialConstantIntValuesState & S)3729 raw_ostream &llvm::operator<<(raw_ostream &OS,
3730                               const PotentialConstantIntValuesState &S) {
3731   OS << "set-state(< {";
3732   if (!S.isValidState())
3733     OS << "full-set";
3734   else {
3735     for (const auto &It : S.getAssumedSet())
3736       OS << It << ", ";
3737     if (S.undefIsContained())
3738       OS << "undef ";
3739   }
3740   OS << "} >)";
3741 
3742   return OS;
3743 }
3744 
operator <<(raw_ostream & OS,const PotentialLLVMValuesState & S)3745 raw_ostream &llvm::operator<<(raw_ostream &OS,
3746                               const PotentialLLVMValuesState &S) {
3747   OS << "set-state(< {";
3748   if (!S.isValidState())
3749     OS << "full-set";
3750   else {
3751     for (const auto &It : S.getAssumedSet()) {
3752       if (auto *F = dyn_cast<Function>(It.first.getValue()))
3753         OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3754       else
3755         OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3756     }
3757     if (S.undefIsContained())
3758       OS << "undef ";
3759   }
3760   OS << "} >)";
3761 
3762   return OS;
3763 }
3764 
print(Attributor * A,raw_ostream & OS) const3765 void AbstractAttribute::print(Attributor *A, raw_ostream &OS) const {
3766   OS << "[";
3767   OS << getName();
3768   OS << "] for CtxI ";
3769 
3770   if (auto *I = getCtxI()) {
3771     OS << "'";
3772     I->print(OS);
3773     OS << "'";
3774   } else
3775     OS << "<<null inst>>";
3776 
3777   OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3778      << '\n';
3779 }
3780 
printWithDeps(raw_ostream & OS) const3781 void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3782   print(OS);
3783 
3784   for (const auto &DepAA : Deps) {
3785     auto *AA = DepAA.getPointer();
3786     OS << "  updates ";
3787     AA->print(OS);
3788   }
3789 
3790   OS << '\n';
3791 }
3792 
operator <<(raw_ostream & OS,const AAPointerInfo::Access & Acc)3793 raw_ostream &llvm::operator<<(raw_ostream &OS,
3794                               const AAPointerInfo::Access &Acc) {
3795   OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3796   if (Acc.getLocalInst() != Acc.getRemoteInst())
3797     OS << " via " << *Acc.getLocalInst();
3798   if (Acc.getContent()) {
3799     if (*Acc.getContent())
3800       OS << " [" << **Acc.getContent() << "]";
3801     else
3802       OS << " [ <unknown> ]";
3803   }
3804   return OS;
3805 }
3806 ///}
3807 
3808 /// ----------------------------------------------------------------------------
3809 ///                       Pass (Manager) Boilerplate
3810 /// ----------------------------------------------------------------------------
3811 
runAttributorOnFunctions(InformationCache & InfoCache,SetVector<Function * > & Functions,AnalysisGetter & AG,CallGraphUpdater & CGUpdater,bool DeleteFns,bool IsModulePass)3812 static bool runAttributorOnFunctions(InformationCache &InfoCache,
3813                                      SetVector<Function *> &Functions,
3814                                      AnalysisGetter &AG,
3815                                      CallGraphUpdater &CGUpdater,
3816                                      bool DeleteFns, bool IsModulePass) {
3817   if (Functions.empty())
3818     return false;
3819 
3820   LLVM_DEBUG({
3821     dbgs() << "[Attributor] Run on module with " << Functions.size()
3822            << " functions:\n";
3823     for (Function *Fn : Functions)
3824       dbgs() << "  - " << Fn->getName() << "\n";
3825   });
3826 
3827   // Create an Attributor and initially empty information cache that is filled
3828   // while we identify default attribute opportunities.
3829   AttributorConfig AC(CGUpdater);
3830   AC.IsModulePass = IsModulePass;
3831   AC.DeleteFns = DeleteFns;
3832 
3833   /// Tracking callback for specialization of indirect calls.
3834   DenseMap<CallBase *, std::unique_ptr<SmallPtrSet<Function *, 8>>>
3835       IndirectCalleeTrackingMap;
3836   if (MaxSpecializationPerCB.getNumOccurrences()) {
3837     AC.IndirectCalleeSpecializationCallback =
3838         [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3839             Function &Callee) {
3840           if (MaxSpecializationPerCB == 0)
3841             return false;
3842           auto &Set = IndirectCalleeTrackingMap[&CB];
3843           if (!Set)
3844             Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3845           if (Set->size() >= MaxSpecializationPerCB)
3846             return Set->contains(&Callee);
3847           Set->insert(&Callee);
3848           return true;
3849         };
3850   }
3851 
3852   Attributor A(Functions, InfoCache, AC);
3853 
3854   // Create shallow wrappers for all functions that are not IPO amendable
3855   if (AllowShallowWrappers)
3856     for (Function *F : Functions)
3857       if (!A.isFunctionIPOAmendable(*F))
3858         Attributor::createShallowWrapper(*F);
3859 
3860   // Internalize non-exact functions
3861   // TODO: for now we eagerly internalize functions without calculating the
3862   //       cost, we need a cost interface to determine whether internalizing
3863   //       a function is "beneficial"
3864   if (AllowDeepWrapper) {
3865     unsigned FunSize = Functions.size();
3866     for (unsigned u = 0; u < FunSize; u++) {
3867       Function *F = Functions[u];
3868       if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3869           !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3870         Function *NewF = Attributor::internalizeFunction(*F);
3871         assert(NewF && "Could not internalize function.");
3872         Functions.insert(NewF);
3873 
3874         // Update call graph
3875         CGUpdater.replaceFunctionWith(*F, *NewF);
3876         for (const Use &U : NewF->uses())
3877           if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3878             auto *CallerF = CB->getCaller();
3879             CGUpdater.reanalyzeFunction(*CallerF);
3880           }
3881       }
3882     }
3883   }
3884 
3885   for (Function *F : Functions) {
3886     if (F->hasExactDefinition())
3887       NumFnWithExactDefinition++;
3888     else
3889       NumFnWithoutExactDefinition++;
3890 
3891     // We look at internal functions only on-demand but if any use is not a
3892     // direct call or outside the current set of analyzed functions, we have
3893     // to do it eagerly.
3894     if (F->hasLocalLinkage()) {
3895       if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3896             const auto *CB = dyn_cast<CallBase>(U.getUser());
3897             return CB && CB->isCallee(&U) &&
3898                    Functions.count(const_cast<Function *>(CB->getCaller()));
3899           }))
3900         continue;
3901     }
3902 
3903     // Populate the Attributor with abstract attribute opportunities in the
3904     // function and the information cache with IR information.
3905     A.identifyDefaultAbstractAttributes(*F);
3906   }
3907 
3908   ChangeStatus Changed = A.run();
3909 
3910   LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3911                     << " functions, result: " << Changed << ".\n");
3912   return Changed == ChangeStatus::CHANGED;
3913 }
3914 
runAttributorLightOnFunctions(InformationCache & InfoCache,SetVector<Function * > & Functions,AnalysisGetter & AG,CallGraphUpdater & CGUpdater,FunctionAnalysisManager & FAM,bool IsModulePass)3915 static bool runAttributorLightOnFunctions(InformationCache &InfoCache,
3916                                           SetVector<Function *> &Functions,
3917                                           AnalysisGetter &AG,
3918                                           CallGraphUpdater &CGUpdater,
3919                                           FunctionAnalysisManager &FAM,
3920                                           bool IsModulePass) {
3921   if (Functions.empty())
3922     return false;
3923 
3924   LLVM_DEBUG({
3925     dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3926            << " functions:\n";
3927     for (Function *Fn : Functions)
3928       dbgs() << "  - " << Fn->getName() << "\n";
3929   });
3930 
3931   // Create an Attributor and initially empty information cache that is filled
3932   // while we identify default attribute opportunities.
3933   AttributorConfig AC(CGUpdater);
3934   AC.IsModulePass = IsModulePass;
3935   AC.DeleteFns = false;
3936   DenseSet<const char *> Allowed(
3937       {&AAWillReturn::ID, &AANoUnwind::ID, &AANoRecurse::ID, &AANoSync::ID,
3938        &AANoFree::ID, &AANoReturn::ID, &AAMemoryLocation::ID,
3939        &AAMemoryBehavior::ID, &AAUnderlyingObjects::ID, &AANoCapture::ID,
3940        &AAInterFnReachability::ID, &AAIntraFnReachability::ID, &AACallEdges::ID,
3941        &AANoFPClass::ID, &AAMustProgress::ID, &AANonNull::ID});
3942   AC.Allowed = &Allowed;
3943   AC.UseLiveness = false;
3944 
3945   Attributor A(Functions, InfoCache, AC);
3946 
3947   for (Function *F : Functions) {
3948     if (F->hasExactDefinition())
3949       NumFnWithExactDefinition++;
3950     else
3951       NumFnWithoutExactDefinition++;
3952 
3953     // We look at internal functions only on-demand but if any use is not a
3954     // direct call or outside the current set of analyzed functions, we have
3955     // to do it eagerly.
3956     if (AC.UseLiveness && F->hasLocalLinkage()) {
3957       if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3958             const auto *CB = dyn_cast<CallBase>(U.getUser());
3959             return CB && CB->isCallee(&U) &&
3960                    Functions.count(const_cast<Function *>(CB->getCaller()));
3961           }))
3962         continue;
3963     }
3964 
3965     // Populate the Attributor with abstract attribute opportunities in the
3966     // function and the information cache with IR information.
3967     A.identifyDefaultAbstractAttributes(*F);
3968   }
3969 
3970   ChangeStatus Changed = A.run();
3971 
3972   if (Changed == ChangeStatus::CHANGED) {
3973     // Invalidate analyses for modified functions so that we don't have to
3974     // invalidate all analyses for all functions in this SCC.
3975     PreservedAnalyses FuncPA;
3976     // We haven't changed the CFG for modified functions.
3977     FuncPA.preserveSet<CFGAnalyses>();
3978     for (Function *Changed : A.getModifiedFunctions()) {
3979       FAM.invalidate(*Changed, FuncPA);
3980       // Also invalidate any direct callers of changed functions since analyses
3981       // may care about attributes of direct callees. For example, MemorySSA
3982       // cares about whether or not a call's callee modifies memory and queries
3983       // that through function attributes.
3984       for (auto *U : Changed->users()) {
3985         if (auto *Call = dyn_cast<CallBase>(U)) {
3986           if (Call->getCalledFunction() == Changed)
3987             FAM.invalidate(*Call->getFunction(), FuncPA);
3988         }
3989       }
3990     }
3991   }
3992   LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3993                     << " functions, result: " << Changed << ".\n");
3994   return Changed == ChangeStatus::CHANGED;
3995 }
3996 
viewGraph()3997 void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3998 
dumpGraph()3999 void AADepGraph::dumpGraph() {
4000   static std::atomic<int> CallTimes;
4001   std::string Prefix;
4002 
4003   if (!DepGraphDotFileNamePrefix.empty())
4004     Prefix = DepGraphDotFileNamePrefix;
4005   else
4006     Prefix = "dep_graph";
4007   std::string Filename =
4008       Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
4009 
4010   outs() << "Dependency graph dump to " << Filename << ".\n";
4011 
4012   std::error_code EC;
4013 
4014   raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
4015   if (!EC)
4016     llvm::WriteGraph(File, this);
4017 
4018   CallTimes++;
4019 }
4020 
print()4021 void AADepGraph::print() {
4022   for (auto DepAA : SyntheticRoot.Deps)
4023     cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4024 }
4025 
run(Module & M,ModuleAnalysisManager & AM)4026 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
4027   FunctionAnalysisManager &FAM =
4028       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4029   AnalysisGetter AG(FAM);
4030 
4031   SetVector<Function *> Functions;
4032   for (Function &F : M)
4033     Functions.insert(&F);
4034 
4035   CallGraphUpdater CGUpdater;
4036   BumpPtrAllocator Allocator;
4037   InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4038   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4039                                /* DeleteFns */ true, /* IsModulePass */ true)) {
4040     // FIXME: Think about passes we will preserve and add them here.
4041     return PreservedAnalyses::none();
4042   }
4043   return PreservedAnalyses::all();
4044 }
4045 
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)4046 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
4047                                            CGSCCAnalysisManager &AM,
4048                                            LazyCallGraph &CG,
4049                                            CGSCCUpdateResult &UR) {
4050   FunctionAnalysisManager &FAM =
4051       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4052   AnalysisGetter AG(FAM);
4053 
4054   SetVector<Function *> Functions;
4055   for (LazyCallGraph::Node &N : C)
4056     Functions.insert(&N.getFunction());
4057 
4058   if (Functions.empty())
4059     return PreservedAnalyses::all();
4060 
4061   Module &M = *Functions.back()->getParent();
4062   CallGraphUpdater CGUpdater;
4063   CGUpdater.initialize(CG, C, AM, UR);
4064   BumpPtrAllocator Allocator;
4065   InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4066   if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4067                                /* DeleteFns */ false,
4068                                /* IsModulePass */ false)) {
4069     // FIXME: Think about passes we will preserve and add them here.
4070     PreservedAnalyses PA;
4071     PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4072     return PA;
4073   }
4074   return PreservedAnalyses::all();
4075 }
4076 
run(Module & M,ModuleAnalysisManager & AM)4077 PreservedAnalyses AttributorLightPass::run(Module &M,
4078                                            ModuleAnalysisManager &AM) {
4079   FunctionAnalysisManager &FAM =
4080       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4081   AnalysisGetter AG(FAM, /* CachedOnly */ true);
4082 
4083   SetVector<Function *> Functions;
4084   for (Function &F : M)
4085     Functions.insert(&F);
4086 
4087   CallGraphUpdater CGUpdater;
4088   BumpPtrAllocator Allocator;
4089   InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4090   if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4091                                     /* IsModulePass */ true)) {
4092     PreservedAnalyses PA;
4093     // We have not added or removed functions.
4094     PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4095     // We already invalidated all relevant function analyses above.
4096     PA.preserveSet<AllAnalysesOn<Function>>();
4097     return PA;
4098   }
4099   return PreservedAnalyses::all();
4100 }
4101 
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)4102 PreservedAnalyses AttributorLightCGSCCPass::run(LazyCallGraph::SCC &C,
4103                                                 CGSCCAnalysisManager &AM,
4104                                                 LazyCallGraph &CG,
4105                                                 CGSCCUpdateResult &UR) {
4106   FunctionAnalysisManager &FAM =
4107       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4108   AnalysisGetter AG(FAM);
4109 
4110   SetVector<Function *> Functions;
4111   for (LazyCallGraph::Node &N : C)
4112     Functions.insert(&N.getFunction());
4113 
4114   if (Functions.empty())
4115     return PreservedAnalyses::all();
4116 
4117   Module &M = *Functions.back()->getParent();
4118   CallGraphUpdater CGUpdater;
4119   CGUpdater.initialize(CG, C, AM, UR);
4120   BumpPtrAllocator Allocator;
4121   InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4122   if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4123                                     /* IsModulePass */ false)) {
4124     PreservedAnalyses PA;
4125     // We have not added or removed functions.
4126     PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4127     // We already invalidated all relevant function analyses above.
4128     PA.preserveSet<AllAnalysesOn<Function>>();
4129     return PA;
4130   }
4131   return PreservedAnalyses::all();
4132 }
4133 namespace llvm {
4134 
4135 template <> struct GraphTraits<AADepGraphNode *> {
4136   using NodeRef = AADepGraphNode *;
4137   using DepTy = PointerIntPair<AADepGraphNode *, 1>;
4138   using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
4139 
getEntryNodellvm::GraphTraits4140   static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
DepGetValllvm::GraphTraits4141   static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4142 
4143   using ChildIteratorType =
4144       mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4145   using ChildEdgeIteratorType = AADepGraphNode::DepSetTy::iterator;
4146 
child_beginllvm::GraphTraits4147   static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4148 
child_endllvm::GraphTraits4149   static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4150 };
4151 
4152 template <>
4153 struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
getEntryNodellvm::GraphTraits4154   static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4155 
4156   using nodes_iterator =
4157       mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4158 
nodes_beginllvm::GraphTraits4159   static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4160 
nodes_endllvm::GraphTraits4161   static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4162 };
4163 
4164 template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
DOTGraphTraitsllvm::DOTGraphTraits4165   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
4166 
getNodeLabelllvm::DOTGraphTraits4167   static std::string getNodeLabel(const AADepGraphNode *Node,
4168                                   const AADepGraph *DG) {
4169     std::string AAString;
4170     raw_string_ostream O(AAString);
4171     Node->print(O);
4172     return AAString;
4173   }
4174 };
4175 
4176 } // end namespace llvm
4177