1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass that deduces and/or propagates
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Transforms/IPO/Attributor.h"
17
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CallGraph.h"
25 #include "llvm/Analysis/InlineCost.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MustExecute.h"
28 #include "llvm/IR/AttributeMask.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/ConstantFold.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/ValueHandle.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/DebugCounter.h"
45 #include "llvm/Support/FileSystem.h"
46 #include "llvm/Support/GraphWriter.h"
47 #include "llvm/Support/ModRef.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/Cloning.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include <cstdint>
53 #include <memory>
54
55 #ifdef EXPENSIVE_CHECKS
56 #include "llvm/IR/Verifier.h"
57 #endif
58
59 #include <cassert>
60 #include <optional>
61 #include <string>
62
63 using namespace llvm;
64
65 #define DEBUG_TYPE "attributor"
66 #define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
67
68 DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
69 "Determine what attributes are manifested in the IR");
70
71 STATISTIC(NumFnDeleted, "Number of function deleted");
72 STATISTIC(NumFnWithExactDefinition,
73 "Number of functions with exact definitions");
74 STATISTIC(NumFnWithoutExactDefinition,
75 "Number of functions without exact definitions");
76 STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
77 STATISTIC(NumAttributesTimedOut,
78 "Number of abstract attributes timed out before fixpoint");
79 STATISTIC(NumAttributesValidFixpoint,
80 "Number of abstract attributes in a valid fixpoint state");
81 STATISTIC(NumAttributesManifested,
82 "Number of abstract attributes manifested in IR");
83
84 // TODO: Determine a good default value.
85 //
86 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
87 // (when run with the first 5 abstract attributes). The results also indicate
88 // that we never reach 32 iterations but always find a fixpoint sooner.
89 //
90 // This will become more evolved once we perform two interleaved fixpoint
91 // iterations: bottom-up and top-down.
92 static cl::opt<unsigned>
93 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
94 cl::desc("Maximal number of fixpoint iterations."),
95 cl::init(32));
96
97 static cl::opt<unsigned>
98 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
99 cl::Hidden,
100 cl::desc("Maximal number of callees specialized for "
101 "a call base"),
102 cl::init(UINT32_MAX));
103
104 static cl::opt<unsigned, true> MaxInitializationChainLengthX(
105 "attributor-max-initialization-chain-length", cl::Hidden,
106 cl::desc(
107 "Maximal number of chained initializations (to avoid stack overflows)"),
108 cl::location(MaxInitializationChainLength), cl::init(1024));
109 unsigned llvm::MaxInitializationChainLength;
110
111 static cl::opt<bool> AnnotateDeclarationCallSites(
112 "attributor-annotate-decl-cs", cl::Hidden,
113 cl::desc("Annotate call sites of function declarations."), cl::init(false));
114
115 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
116 cl::init(true), cl::Hidden);
117
118 static cl::opt<bool>
119 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to create shallow "
121 "wrappers for non-exact definitions."),
122 cl::init(false));
123
124 static cl::opt<bool>
125 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
126 cl::desc("Allow the Attributor to use IP information "
127 "derived from non-exact functions via cloning"),
128 cl::init(false));
129
130 // These options can only used for debug builds.
131 #ifndef NDEBUG
132 static cl::list<std::string>
133 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
134 cl::desc("Comma separated list of attribute names that are "
135 "allowed to be seeded."),
136 cl::CommaSeparated);
137
138 static cl::list<std::string> FunctionSeedAllowList(
139 "attributor-function-seed-allow-list", cl::Hidden,
140 cl::desc("Comma separated list of function names that are "
141 "allowed to be seeded."),
142 cl::CommaSeparated);
143 #endif
144
145 static cl::opt<bool>
146 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
147 cl::desc("Dump the dependency graph to dot files."),
148 cl::init(false));
149
150 static cl::opt<std::string> DepGraphDotFileNamePrefix(
151 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
152 cl::desc("The prefix used for the CallGraph dot file names."));
153
154 static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
155 cl::desc("View the dependency graph."),
156 cl::init(false));
157
158 static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
159 cl::desc("Print attribute dependencies"),
160 cl::init(false));
161
162 static cl::opt<bool> EnableCallSiteSpecific(
163 "attributor-enable-call-site-specific-deduction", cl::Hidden,
164 cl::desc("Allow the Attributor to do call site specific analysis"),
165 cl::init(false));
166
167 static cl::opt<bool>
168 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
169 cl::desc("Print Attributor's internal call graph"),
170 cl::init(false));
171
172 static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
173 cl::Hidden,
174 cl::desc("Try to simplify all loads."),
175 cl::init(true));
176
177 static cl::opt<bool> CloseWorldAssumption(
178 "attributor-assume-closed-world", cl::Hidden,
179 cl::desc("Should a closed world be assumed, or not. Default if not set."));
180
181 /// Logic operators for the change status enum class.
182 ///
183 ///{
operator |(ChangeStatus L,ChangeStatus R)184 ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
185 return L == ChangeStatus::CHANGED ? L : R;
186 }
operator |=(ChangeStatus & L,ChangeStatus R)187 ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
188 L = L | R;
189 return L;
190 }
operator &(ChangeStatus L,ChangeStatus R)191 ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
192 return L == ChangeStatus::UNCHANGED ? L : R;
193 }
operator &=(ChangeStatus & L,ChangeStatus R)194 ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
195 L = L & R;
196 return L;
197 }
198 ///}
199
isGPU(const Module & M)200 bool AA::isGPU(const Module &M) {
201 Triple T(M.getTargetTriple());
202 return T.isGPU();
203 }
204
isNoSyncInst(Attributor & A,const Instruction & I,const AbstractAttribute & QueryingAA)205 bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
206 const AbstractAttribute &QueryingAA) {
207 // We are looking for volatile instructions or non-relaxed atomics.
208 if (const auto *CB = dyn_cast<CallBase>(&I)) {
209 if (CB->hasFnAttr(Attribute::NoSync))
210 return true;
211
212 // Non-convergent and readnone imply nosync.
213 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
214 return true;
215
216 if (AANoSync::isNoSyncIntrinsic(&I))
217 return true;
218
219 bool IsKnownNoSync;
220 return AA::hasAssumedIRAttr<Attribute::NoSync>(
221 A, &QueryingAA, IRPosition::callsite_function(*CB),
222 DepClassTy::OPTIONAL, IsKnownNoSync);
223 }
224
225 if (!I.mayReadOrWriteMemory())
226 return true;
227
228 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
229 }
230
isDynamicallyUnique(Attributor & A,const AbstractAttribute & QueryingAA,const Value & V,bool ForAnalysisOnly)231 bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
232 const Value &V, bool ForAnalysisOnly) {
233 // TODO: See the AAInstanceInfo class comment.
234 if (!ForAnalysisOnly)
235 return false;
236 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
237 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
238 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
239 }
240
241 Constant *
getInitialValueForObj(Attributor & A,const AbstractAttribute & QueryingAA,Value & Obj,Type & Ty,const TargetLibraryInfo * TLI,const DataLayout & DL,AA::RangeTy * RangePtr)242 AA::getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA,
243 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
244 const DataLayout &DL, AA::RangeTy *RangePtr) {
245 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
246 return Init;
247 auto *GV = dyn_cast<GlobalVariable>(&Obj);
248 if (!GV)
249 return nullptr;
250
251 bool UsedAssumedInformation = false;
252 Constant *Initializer = nullptr;
253 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
254 auto AssumedGV = A.getAssumedInitializerFromCallBack(
255 *GV, &QueryingAA, UsedAssumedInformation);
256 Initializer = *AssumedGV;
257 if (!Initializer)
258 return nullptr;
259 } else {
260 if (!GV->hasLocalLinkage()) {
261 // Externally visible global that's either non-constant,
262 // or a constant with an uncertain initializer.
263 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
264 return nullptr;
265 }
266
267 // Globals with local linkage are always initialized.
268 assert(!GV->hasLocalLinkage() || GV->hasInitializer());
269
270 if (!Initializer)
271 Initializer = GV->getInitializer();
272 }
273
274 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
275 APInt Offset = APInt(64, RangePtr->Offset);
276 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
277 }
278
279 return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
280 }
281
isValidInScope(const Value & V,const Function * Scope)282 bool AA::isValidInScope(const Value &V, const Function *Scope) {
283 if (isa<Constant>(V))
284 return true;
285 if (auto *I = dyn_cast<Instruction>(&V))
286 return I->getFunction() == Scope;
287 if (auto *A = dyn_cast<Argument>(&V))
288 return A->getParent() == Scope;
289 return false;
290 }
291
isValidAtPosition(const AA::ValueAndContext & VAC,InformationCache & InfoCache)292 bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
293 InformationCache &InfoCache) {
294 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
295 return true;
296 const Function *Scope = nullptr;
297 const Instruction *CtxI = VAC.getCtxI();
298 if (CtxI)
299 Scope = CtxI->getFunction();
300 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
301 return A->getParent() == Scope;
302 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
303 if (I->getFunction() == Scope) {
304 if (const DominatorTree *DT =
305 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
306 *Scope))
307 return DT->dominates(I, CtxI);
308 // Local dominance check mostly for the old PM passes.
309 if (CtxI && I->getParent() == CtxI->getParent())
310 return llvm::any_of(
311 make_range(I->getIterator(), I->getParent()->end()),
312 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
313 }
314 }
315 return false;
316 }
317
getWithType(Value & V,Type & Ty)318 Value *AA::getWithType(Value &V, Type &Ty) {
319 if (V.getType() == &Ty)
320 return &V;
321 if (isa<PoisonValue>(V))
322 return PoisonValue::get(&Ty);
323 if (isa<UndefValue>(V))
324 return UndefValue::get(&Ty);
325 if (auto *C = dyn_cast<Constant>(&V)) {
326 if (C->isNullValue() && !Ty.isPtrOrPtrVectorTy())
327 return Constant::getNullValue(&Ty);
328 if (C->getType()->isPointerTy() && Ty.isPointerTy())
329 return ConstantExpr::getPointerCast(C, &Ty);
330 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
331 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
332 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
333 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
334 return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
335 }
336 }
337 return nullptr;
338 }
339
340 std::optional<Value *>
combineOptionalValuesInAAValueLatice(const std::optional<Value * > & A,const std::optional<Value * > & B,Type * Ty)341 AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
342 const std::optional<Value *> &B,
343 Type *Ty) {
344 if (A == B)
345 return A;
346 if (!B)
347 return A;
348 if (*B == nullptr)
349 return nullptr;
350 if (!A)
351 return Ty ? getWithType(**B, *Ty) : nullptr;
352 if (*A == nullptr)
353 return nullptr;
354 if (!Ty)
355 Ty = (*A)->getType();
356 if (isa_and_nonnull<UndefValue>(*A))
357 return getWithType(**B, *Ty);
358 if (isa<UndefValue>(*B))
359 return A;
360 if (*A && *B && *A == getWithType(**B, *Ty))
361 return A;
362 return nullptr;
363 }
364
365 template <bool IsLoad, typename Ty>
getPotentialCopiesOfMemoryValue(Attributor & A,Ty & I,SmallSetVector<Value *,4> & PotentialCopies,SmallSetVector<Instruction *,4> * PotentialValueOrigins,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)366 static bool getPotentialCopiesOfMemoryValue(
367 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
368 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
369 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
370 bool OnlyExact) {
371 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
372 << " (only exact: " << OnlyExact << ")\n";);
373
374 Value &Ptr = *I.getPointerOperand();
375 // Containers to remember the pointer infos and new copies while we are not
376 // sure that we can find all of them. If we abort we want to avoid spurious
377 // dependences and potential copies in the provided container.
378 SmallVector<const AAPointerInfo *> PIs;
379 SmallSetVector<Value *, 8> NewCopies;
380 SmallSetVector<Instruction *, 8> NewCopyOrigins;
381
382 const auto *TLI =
383 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
384
385 auto Pred = [&](Value &Obj) {
386 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
387 if (isa<UndefValue>(&Obj))
388 return true;
389 if (isa<ConstantPointerNull>(&Obj)) {
390 // A null pointer access can be undefined but any offset from null may
391 // be OK. We do not try to optimize the latter.
392 if (!NullPointerIsDefined(I.getFunction(),
393 Ptr.getType()->getPointerAddressSpace()) &&
394 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
395 AA::Interprocedural) == &Obj)
396 return true;
397 LLVM_DEBUG(
398 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
399 return false;
400 }
401 // TODO: Use assumed noalias return.
402 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
403 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
404 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
405 << "\n";);
406 return false;
407 }
408 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
409 if (!GV->hasLocalLinkage() &&
410 !(GV->isConstant() && GV->hasInitializer())) {
411 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
412 "linkage, not supported yet: "
413 << Obj << "\n";);
414 return false;
415 }
416
417 bool NullOnly = true;
418 bool NullRequired = false;
419 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
420 bool IsExact) {
421 if (!V || *V == nullptr)
422 NullOnly = false;
423 else if (isa<UndefValue>(*V))
424 /* No op */;
425 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
426 NullRequired = !IsExact;
427 else
428 NullOnly = false;
429 };
430
431 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
432 Value &V) {
433 Value *AdjV = AA::getWithType(V, *I.getType());
434 if (!AdjV) {
435 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
436 "cannot be converted to read type: "
437 << *Acc.getRemoteInst() << " : " << *I.getType()
438 << "\n";);
439 }
440 return AdjV;
441 };
442
443 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
444 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
445 return true;
446 if (IsLoad) {
447 if (Acc.isWrittenValueYetUndetermined())
448 return true;
449 if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
450 return false;
451 if (!Acc.isWrittenValueUnknown())
452 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
453 if (NewCopies.count(V)) {
454 NewCopyOrigins.insert(Acc.getRemoteInst());
455 return true;
456 }
457 if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
458 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
459 if (NewCopies.count(V)) {
460 NewCopyOrigins.insert(Acc.getRemoteInst());
461 return true;
462 }
463 }
464 return false;
465 };
466
467 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
468 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
469 return true;
470 if (IsLoad && Acc.isWrittenValueYetUndetermined())
471 return true;
472 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
473 if (OnlyExact && !IsExact && !NullOnly &&
474 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
475 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
476 << ", abort!\n");
477 return false;
478 }
479 if (NullRequired && !NullOnly) {
480 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
481 "one, however found non-null one: "
482 << *Acc.getRemoteInst() << ", abort!\n");
483 return false;
484 }
485 if (IsLoad) {
486 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
487 if (!Acc.isWrittenValueUnknown()) {
488 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
489 if (!V)
490 return false;
491 NewCopies.insert(V);
492 if (PotentialValueOrigins)
493 NewCopyOrigins.insert(Acc.getRemoteInst());
494 return true;
495 }
496 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
497 if (!SI) {
498 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
499 "instruction not supported yet: "
500 << *Acc.getRemoteInst() << "\n";);
501 return false;
502 }
503 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
504 if (!V)
505 return false;
506 NewCopies.insert(V);
507 if (PotentialValueOrigins)
508 NewCopyOrigins.insert(SI);
509 } else {
510 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
511 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
512 if (!LI && OnlyExact) {
513 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
514 "instruction not supported yet: "
515 << *Acc.getRemoteInst() << "\n";);
516 return false;
517 }
518 NewCopies.insert(Acc.getRemoteInst());
519 }
520 return true;
521 };
522
523 // If the value has been written to we don't need the initial value of the
524 // object.
525 bool HasBeenWrittenTo = false;
526
527 AA::RangeTy Range;
528 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
529 DepClassTy::NONE);
530 if (!PI || !PI->forallInterferingAccesses(
531 A, QueryingAA, I,
532 /* FindInterferingWrites */ IsLoad,
533 /* FindInterferingReads */ !IsLoad, CheckAccess,
534 HasBeenWrittenTo, Range, SkipCB)) {
535 LLVM_DEBUG(
536 dbgs()
537 << "Failed to verify all interfering accesses for underlying object: "
538 << Obj << "\n");
539 return false;
540 }
541
542 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
543 const DataLayout &DL = A.getDataLayout();
544 Value *InitialValue = AA::getInitialValueForObj(
545 A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
546 if (!InitialValue) {
547 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
548 "underlying object, abort!\n");
549 return false;
550 }
551 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
552 if (NullRequired && !NullOnly) {
553 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
554 "null or undef, abort!\n");
555 return false;
556 }
557
558 NewCopies.insert(InitialValue);
559 if (PotentialValueOrigins)
560 NewCopyOrigins.insert(nullptr);
561 }
562
563 PIs.push_back(PI);
564
565 return true;
566 };
567
568 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
569 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
570 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
571 LLVM_DEBUG(
572 dbgs() << "Underlying objects stored into could not be determined\n";);
573 return false;
574 }
575
576 // Only if we were successful collection all potential copies we record
577 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
578 // given PotentialCopies container.
579 for (const auto *PI : PIs) {
580 if (!PI->getState().isAtFixpoint())
581 UsedAssumedInformation = true;
582 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
583 }
584 PotentialCopies.insert_range(NewCopies);
585 if (PotentialValueOrigins)
586 PotentialValueOrigins->insert_range(NewCopyOrigins);
587
588 return true;
589 }
590
getPotentiallyLoadedValues(Attributor & A,LoadInst & LI,SmallSetVector<Value *,4> & PotentialValues,SmallSetVector<Instruction *,4> & PotentialValueOrigins,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)591 bool AA::getPotentiallyLoadedValues(
592 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
593 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
594 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
595 bool OnlyExact) {
596 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
597 A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
598 UsedAssumedInformation, OnlyExact);
599 }
600
getPotentialCopiesOfStoredValue(Attributor & A,StoreInst & SI,SmallSetVector<Value *,4> & PotentialCopies,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)601 bool AA::getPotentialCopiesOfStoredValue(
602 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
603 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
604 bool OnlyExact) {
605 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
606 A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
607 OnlyExact);
608 }
609
isAssumedReadOnlyOrReadNone(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool RequireReadNone,bool & IsKnown)610 static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
611 const AbstractAttribute &QueryingAA,
612 bool RequireReadNone, bool &IsKnown) {
613 if (RequireReadNone) {
614 if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
615 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
616 /* IgnoreSubsumingPositions */ true))
617 return true;
618 } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
619 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
620 /* IgnoreSubsumingPositions */ true))
621 return true;
622
623 IRPosition::Kind Kind = IRP.getPositionKind();
624 if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
625 const auto *MemLocAA =
626 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
627 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
628 IsKnown = MemLocAA->isKnownReadNone();
629 if (!IsKnown)
630 A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
631 return true;
632 }
633 }
634
635 const auto *MemBehaviorAA =
636 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
637 if (MemBehaviorAA &&
638 (MemBehaviorAA->isAssumedReadNone() ||
639 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
640 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
641 : MemBehaviorAA->isKnownReadOnly();
642 if (!IsKnown)
643 A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
644 return true;
645 }
646
647 return false;
648 }
649
isAssumedReadOnly(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool & IsKnown)650 bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
651 const AbstractAttribute &QueryingAA, bool &IsKnown) {
652 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
653 /* RequireReadNone */ false, IsKnown);
654 }
isAssumedReadNone(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool & IsKnown)655 bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
656 const AbstractAttribute &QueryingAA, bool &IsKnown) {
657 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
658 /* RequireReadNone */ true, IsKnown);
659 }
660
661 static bool
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Instruction * ToI,const Function & ToFn,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)662 isPotentiallyReachable(Attributor &A, const Instruction &FromI,
663 const Instruction *ToI, const Function &ToFn,
664 const AbstractAttribute &QueryingAA,
665 const AA::InstExclusionSetTy *ExclusionSet,
666 std::function<bool(const Function &F)> GoBackwardsCB) {
667 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
668 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
669 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
670 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
671 << "]\n";
672 if (ExclusionSet)
673 for (auto *ES : *ExclusionSet)
674 dbgs() << *ES << "\n";
675 });
676
677 // We know kernels (generally) cannot be called from within the module. Thus,
678 // for reachability we would need to step back from a kernel which would allow
679 // us to reach anything anyway. Even if a kernel is invoked from another
680 // kernel, values like allocas and shared memory are not accessible. We
681 // implicitly check for this situation to avoid costly lookups.
682 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
683 !GoBackwardsCB(*FromI.getFunction()) && A.getInfoCache().isKernel(ToFn) &&
684 A.getInfoCache().isKernel(*FromI.getFunction())) {
685 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
686 "module; success\n";);
687 return false;
688 }
689
690 // If we can go arbitrarily backwards we will eventually reach an entry point
691 // that can reach ToI. Only if a set of blocks through which we cannot go is
692 // provided, or once we track internal functions not accessible from the
693 // outside, it makes sense to perform backwards analysis in the absence of a
694 // GoBackwardsCB.
695 if (!GoBackwardsCB && !ExclusionSet) {
696 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
697 << " is not checked backwards and does not have an "
698 "exclusion set, abort\n");
699 return true;
700 }
701
702 SmallPtrSet<const Instruction *, 8> Visited;
703 SmallVector<const Instruction *> Worklist;
704 Worklist.push_back(&FromI);
705
706 while (!Worklist.empty()) {
707 const Instruction *CurFromI = Worklist.pop_back_val();
708 if (!Visited.insert(CurFromI).second)
709 continue;
710
711 const Function *FromFn = CurFromI->getFunction();
712 if (FromFn == &ToFn) {
713 if (!ToI)
714 return true;
715 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
716 << " intraprocedurally\n");
717 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
718 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
719 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
720 A, *CurFromI, *ToI, ExclusionSet);
721 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
722 << (Result ? "can potentially " : "cannot ") << "reach "
723 << *ToI << " [Intra]\n");
724 if (Result)
725 return true;
726 }
727
728 bool Result = true;
729 if (!ToFn.isDeclaration() && ToI) {
730 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
731 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
732 const Instruction &EntryI = ToFn.getEntryBlock().front();
733 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
734 A, EntryI, *ToI, ExclusionSet);
735 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
736 << " " << (Result ? "can potentially " : "cannot ")
737 << "reach @" << *ToI << " [ToFn]\n");
738 }
739
740 if (Result) {
741 // The entry of the ToFn can reach the instruction ToI. If the current
742 // instruction is already known to reach the ToFn.
743 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
744 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
745 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
746 A, *CurFromI, ToFn, ExclusionSet);
747 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
748 << " " << (Result ? "can potentially " : "cannot ")
749 << "reach @" << ToFn.getName() << " [FromFn]\n");
750 if (Result)
751 return true;
752 }
753
754 // TODO: Check assumed nounwind.
755 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
756 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
757 auto ReturnInstCB = [&](Instruction &Ret) {
758 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
759 A, *CurFromI, Ret, ExclusionSet);
760 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
761 << (Result ? "can potentially " : "cannot ") << "reach "
762 << Ret << " [Intra]\n");
763 return !Result;
764 };
765
766 // Check if we can reach returns.
767 bool UsedAssumedInformation = false;
768 if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
769 {Instruction::Ret}, UsedAssumedInformation)) {
770 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
771 continue;
772 }
773
774 if (!GoBackwardsCB) {
775 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
776 << " is not checked backwards, abort\n");
777 return true;
778 }
779
780 // If we do not go backwards from the FromFn we are done here and so far we
781 // could not find a way to reach ToFn/ToI.
782 if (!GoBackwardsCB(*FromFn))
783 continue;
784
785 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
786 << FromFn->getName() << "\n");
787
788 auto CheckCallSite = [&](AbstractCallSite ACS) {
789 CallBase *CB = ACS.getInstruction();
790 if (!CB)
791 return false;
792
793 if (isa<InvokeInst>(CB))
794 return false;
795
796 Instruction *Inst = CB->getNextNonDebugInstruction();
797 Worklist.push_back(Inst);
798 return true;
799 };
800
801 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
802 /* RequireAllCallSites */ true,
803 &QueryingAA, UsedAssumedInformation);
804 if (Result) {
805 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
806 << " in @" << FromFn->getName()
807 << " failed, give up\n");
808 return true;
809 }
810
811 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
812 << " in @" << FromFn->getName()
813 << " worklist size is: " << Worklist.size() << "\n");
814 }
815 return false;
816 }
817
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Instruction & ToI,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)818 bool AA::isPotentiallyReachable(
819 Attributor &A, const Instruction &FromI, const Instruction &ToI,
820 const AbstractAttribute &QueryingAA,
821 const AA::InstExclusionSetTy *ExclusionSet,
822 std::function<bool(const Function &F)> GoBackwardsCB) {
823 const Function *ToFn = ToI.getFunction();
824 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
825 ExclusionSet, GoBackwardsCB);
826 }
827
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Function & ToFn,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)828 bool AA::isPotentiallyReachable(
829 Attributor &A, const Instruction &FromI, const Function &ToFn,
830 const AbstractAttribute &QueryingAA,
831 const AA::InstExclusionSetTy *ExclusionSet,
832 std::function<bool(const Function &F)> GoBackwardsCB) {
833 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
834 ExclusionSet, GoBackwardsCB);
835 }
836
isAssumedThreadLocalObject(Attributor & A,Value & Obj,const AbstractAttribute & QueryingAA)837 bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
838 const AbstractAttribute &QueryingAA) {
839 if (isa<UndefValue>(Obj))
840 return true;
841 if (isa<AllocaInst>(Obj)) {
842 InformationCache &InfoCache = A.getInfoCache();
843 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
844 LLVM_DEBUG(
845 dbgs() << "[AA] Object '" << Obj
846 << "' is thread local; stack objects are thread local.\n");
847 return true;
848 }
849 bool IsKnownNoCapture;
850 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::Captures>(
851 A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
852 IsKnownNoCapture);
853 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
854 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
855 << (IsAssumedNoCapture ? "non-" : "")
856 << "captured stack object.\n");
857 return IsAssumedNoCapture;
858 }
859 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
860 if (GV->isConstant()) {
861 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
862 << "' is thread local; constant global\n");
863 return true;
864 }
865 if (GV->isThreadLocal()) {
866 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
867 << "' is thread local; thread local global\n");
868 return true;
869 }
870 }
871
872 if (A.getInfoCache().targetIsGPU()) {
873 if (Obj.getType()->getPointerAddressSpace() ==
874 (int)AA::GPUAddressSpace::Local) {
875 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
876 << "' is thread local; GPU local memory\n");
877 return true;
878 }
879 if (Obj.getType()->getPointerAddressSpace() ==
880 (int)AA::GPUAddressSpace::Constant) {
881 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
882 << "' is thread local; GPU constant memory\n");
883 return true;
884 }
885 }
886
887 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
888 return false;
889 }
890
isPotentiallyAffectedByBarrier(Attributor & A,const Instruction & I,const AbstractAttribute & QueryingAA)891 bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
892 const AbstractAttribute &QueryingAA) {
893 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
894 return false;
895
896 SmallSetVector<const Value *, 8> Ptrs;
897
898 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
899 if (!Loc || !Loc->Ptr) {
900 LLVM_DEBUG(
901 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
902 return false;
903 }
904 Ptrs.insert(Loc->Ptr);
905 return true;
906 };
907
908 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
909 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
910 return true;
911 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
912 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
913 return true;
914 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
915 return true;
916
917 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
918 }
919
isPotentiallyAffectedByBarrier(Attributor & A,ArrayRef<const Value * > Ptrs,const AbstractAttribute & QueryingAA,const Instruction * CtxI)920 bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
921 ArrayRef<const Value *> Ptrs,
922 const AbstractAttribute &QueryingAA,
923 const Instruction *CtxI) {
924 for (const Value *Ptr : Ptrs) {
925 if (!Ptr) {
926 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
927 return true;
928 }
929
930 auto Pred = [&](Value &Obj) {
931 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
932 return true;
933 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
934 << "'; -> requires barrier\n");
935 return false;
936 };
937
938 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
939 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
940 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
941 return true;
942 }
943 return false;
944 }
945
946 /// Return true if \p New is equal or worse than \p Old.
isEqualOrWorse(const Attribute & New,const Attribute & Old)947 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
948 if (!Old.isIntAttribute())
949 return true;
950
951 return Old.getValueAsInt() >= New.getValueAsInt();
952 }
953
954 /// Return true if the information provided by \p Attr was added to the
955 /// attribute set \p AttrSet. This is only the case if it was not already
956 /// present in \p AttrSet.
addIfNotExistent(LLVMContext & Ctx,const Attribute & Attr,AttributeSet AttrSet,bool ForceReplace,AttrBuilder & AB)957 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
958 AttributeSet AttrSet, bool ForceReplace,
959 AttrBuilder &AB) {
960
961 if (Attr.isEnumAttribute()) {
962 Attribute::AttrKind Kind = Attr.getKindAsEnum();
963 if (AttrSet.hasAttribute(Kind))
964 return false;
965 AB.addAttribute(Kind);
966 return true;
967 }
968 if (Attr.isStringAttribute()) {
969 StringRef Kind = Attr.getKindAsString();
970 if (AttrSet.hasAttribute(Kind)) {
971 if (!ForceReplace)
972 return false;
973 }
974 AB.addAttribute(Kind, Attr.getValueAsString());
975 return true;
976 }
977 if (Attr.isIntAttribute()) {
978 Attribute::AttrKind Kind = Attr.getKindAsEnum();
979 if (!ForceReplace && Kind == Attribute::Memory) {
980 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
981 if (ME == AttrSet.getMemoryEffects())
982 return false;
983 AB.addMemoryAttr(ME);
984 return true;
985 }
986 if (AttrSet.hasAttribute(Kind)) {
987 if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
988 return false;
989 }
990 AB.addAttribute(Attr);
991 return true;
992 }
993 if (Attr.isConstantRangeAttribute()) {
994 Attribute::AttrKind Kind = Attr.getKindAsEnum();
995 if (!ForceReplace && AttrSet.hasAttribute(Kind))
996 return false;
997 AB.addAttribute(Attr);
998 return true;
999 }
1000
1001 llvm_unreachable("Expected enum or string attribute!");
1002 }
1003
getAssociatedArgument() const1004 Argument *IRPosition::getAssociatedArgument() const {
1005 if (getPositionKind() == IRP_ARGUMENT)
1006 return cast<Argument>(&getAnchorValue());
1007
1008 // Not an Argument and no argument number means this is not a call site
1009 // argument, thus we cannot find a callback argument to return.
1010 int ArgNo = getCallSiteArgNo();
1011 if (ArgNo < 0)
1012 return nullptr;
1013
1014 // Use abstract call sites to make the connection between the call site
1015 // values and the ones in callbacks. If a callback was found that makes use
1016 // of the underlying call site operand, we want the corresponding callback
1017 // callee argument and not the direct callee argument.
1018 std::optional<Argument *> CBCandidateArg;
1019 SmallVector<const Use *, 4> CallbackUses;
1020 const auto &CB = cast<CallBase>(getAnchorValue());
1021 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1022 for (const Use *U : CallbackUses) {
1023 AbstractCallSite ACS(U);
1024 assert(ACS && ACS.isCallbackCall());
1025 if (!ACS.getCalledFunction())
1026 continue;
1027
1028 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1029
1030 // Test if the underlying call site operand is argument number u of the
1031 // callback callee.
1032 if (ACS.getCallArgOperandNo(u) != ArgNo)
1033 continue;
1034
1035 assert(ACS.getCalledFunction()->arg_size() > u &&
1036 "ACS mapped into var-args arguments!");
1037 if (CBCandidateArg) {
1038 CBCandidateArg = nullptr;
1039 break;
1040 }
1041 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1042 }
1043 }
1044
1045 // If we found a unique callback candidate argument, return it.
1046 if (CBCandidateArg && *CBCandidateArg)
1047 return *CBCandidateArg;
1048
1049 // If no callbacks were found, or none used the underlying call site operand
1050 // exclusively, use the direct callee argument if available.
1051 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1052 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1053 return Callee->getArg(ArgNo);
1054
1055 return nullptr;
1056 }
1057
update(Attributor & A)1058 ChangeStatus AbstractAttribute::update(Attributor &A) {
1059 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1060 if (getState().isAtFixpoint())
1061 return HasChanged;
1062
1063 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1064
1065 HasChanged = updateImpl(A);
1066
1067 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1068 << "\n");
1069
1070 return HasChanged;
1071 }
1072
Attributor(SetVector<Function * > & Functions,InformationCache & InfoCache,AttributorConfig Configuration)1073 Attributor::Attributor(SetVector<Function *> &Functions,
1074 InformationCache &InfoCache,
1075 AttributorConfig Configuration)
1076 : Allocator(InfoCache.Allocator), Functions(Functions),
1077 InfoCache(InfoCache), Configuration(Configuration) {
1078 if (!isClosedWorldModule())
1079 return;
1080 for (Function *Fn : Functions)
1081 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1082 /*IgnoreCallbackUses=*/false,
1083 /*IgnoreAssumeLikeCalls=*/true,
1084 /*IgnoreLLVMUsed=*/true,
1085 /*IgnoreARCAttachedCall=*/false,
1086 /*IgnoreCastedDirectCall=*/true))
1087 InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1088 }
1089
getAttrsFromAssumes(const IRPosition & IRP,Attribute::AttrKind AK,SmallVectorImpl<Attribute> & Attrs)1090 bool Attributor::getAttrsFromAssumes(const IRPosition &IRP,
1091 Attribute::AttrKind AK,
1092 SmallVectorImpl<Attribute> &Attrs) {
1093 assert(IRP.getPositionKind() != IRPosition::IRP_INVALID &&
1094 "Did expect a valid position!");
1095 MustBeExecutedContextExplorer *Explorer =
1096 getInfoCache().getMustBeExecutedContextExplorer();
1097 if (!Explorer)
1098 return false;
1099
1100 Value &AssociatedValue = IRP.getAssociatedValue();
1101
1102 const Assume2KnowledgeMap &A2K =
1103 getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1104
1105 // Check if we found any potential assume use, if not we don't need to create
1106 // explorer iterators.
1107 if (A2K.empty())
1108 return false;
1109
1110 LLVMContext &Ctx = AssociatedValue.getContext();
1111 unsigned AttrsSize = Attrs.size();
1112 auto EIt = Explorer->begin(IRP.getCtxI()),
1113 EEnd = Explorer->end(IRP.getCtxI());
1114 for (const auto &It : A2K)
1115 if (Explorer->findInContextOf(It.first, EIt, EEnd))
1116 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1117 return AttrsSize != Attrs.size();
1118 }
1119
1120 template <typename DescTy>
1121 ChangeStatus
updateAttrMap(const IRPosition & IRP,ArrayRef<DescTy> AttrDescs,function_ref<bool (const DescTy &,AttributeSet,AttributeMask &,AttrBuilder &)> CB)1122 Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1123 function_ref<bool(const DescTy &, AttributeSet,
1124 AttributeMask &, AttrBuilder &)>
1125 CB) {
1126 if (AttrDescs.empty())
1127 return ChangeStatus::UNCHANGED;
1128 switch (IRP.getPositionKind()) {
1129 case IRPosition::IRP_FLOAT:
1130 case IRPosition::IRP_INVALID:
1131 return ChangeStatus::UNCHANGED;
1132 default:
1133 break;
1134 };
1135
1136 AttributeList AL;
1137 Value *AttrListAnchor = IRP.getAttrListAnchor();
1138 auto It = AttrsMap.find(AttrListAnchor);
1139 if (It == AttrsMap.end())
1140 AL = IRP.getAttrList();
1141 else
1142 AL = It->getSecond();
1143
1144 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1145 auto AttrIdx = IRP.getAttrIdx();
1146 AttributeSet AS = AL.getAttributes(AttrIdx);
1147 AttributeMask AM;
1148 AttrBuilder AB(Ctx);
1149
1150 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1151 for (const DescTy &AttrDesc : AttrDescs)
1152 if (CB(AttrDesc, AS, AM, AB))
1153 HasChanged = ChangeStatus::CHANGED;
1154
1155 if (HasChanged == ChangeStatus::UNCHANGED)
1156 return ChangeStatus::UNCHANGED;
1157
1158 AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1159 AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1160 AttrsMap[AttrListAnchor] = AL;
1161 return ChangeStatus::CHANGED;
1162 }
1163
hasAttr(const IRPosition & IRP,ArrayRef<Attribute::AttrKind> AttrKinds,bool IgnoreSubsumingPositions,Attribute::AttrKind ImpliedAttributeKind)1164 bool Attributor::hasAttr(const IRPosition &IRP,
1165 ArrayRef<Attribute::AttrKind> AttrKinds,
1166 bool IgnoreSubsumingPositions,
1167 Attribute::AttrKind ImpliedAttributeKind) {
1168 bool Implied = false;
1169 bool HasAttr = false;
1170 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1171 AttributeMask &, AttrBuilder &) {
1172 if (AttrSet.hasAttribute(Kind)) {
1173 Implied |= Kind != ImpliedAttributeKind;
1174 HasAttr = true;
1175 }
1176 return false;
1177 };
1178 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1179 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1180 if (HasAttr)
1181 break;
1182 // The first position returned by the SubsumingPositionIterator is
1183 // always the position itself. If we ignore subsuming positions we
1184 // are done after the first iteration.
1185 if (IgnoreSubsumingPositions)
1186 break;
1187 Implied = true;
1188 }
1189 if (!HasAttr) {
1190 Implied = true;
1191 SmallVector<Attribute> Attrs;
1192 for (Attribute::AttrKind AK : AttrKinds)
1193 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1194 HasAttr = true;
1195 break;
1196 }
1197 }
1198
1199 // Check if we should manifest the implied attribute kind at the IRP.
1200 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1201 manifestAttrs(IRP, {Attribute::get(IRP.getAnchorValue().getContext(),
1202 ImpliedAttributeKind)});
1203 return HasAttr;
1204 }
1205
getAttrs(const IRPosition & IRP,ArrayRef<Attribute::AttrKind> AttrKinds,SmallVectorImpl<Attribute> & Attrs,bool IgnoreSubsumingPositions)1206 void Attributor::getAttrs(const IRPosition &IRP,
1207 ArrayRef<Attribute::AttrKind> AttrKinds,
1208 SmallVectorImpl<Attribute> &Attrs,
1209 bool IgnoreSubsumingPositions) {
1210 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1211 AttributeSet AttrSet, AttributeMask &,
1212 AttrBuilder &) {
1213 if (AttrSet.hasAttribute(Kind))
1214 Attrs.push_back(AttrSet.getAttribute(Kind));
1215 return false;
1216 };
1217 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1218 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1219 // The first position returned by the SubsumingPositionIterator is
1220 // always the position itself. If we ignore subsuming positions we
1221 // are done after the first iteration.
1222 if (IgnoreSubsumingPositions)
1223 break;
1224 }
1225 for (Attribute::AttrKind AK : AttrKinds)
1226 getAttrsFromAssumes(IRP, AK, Attrs);
1227 }
1228
removeAttrs(const IRPosition & IRP,ArrayRef<Attribute::AttrKind> AttrKinds)1229 ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1230 ArrayRef<Attribute::AttrKind> AttrKinds) {
1231 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1232 AttributeMask &AM, AttrBuilder &) {
1233 if (!AttrSet.hasAttribute(Kind))
1234 return false;
1235 AM.addAttribute(Kind);
1236 return true;
1237 };
1238 return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1239 }
1240
removeAttrs(const IRPosition & IRP,ArrayRef<StringRef> Attrs)1241 ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1242 ArrayRef<StringRef> Attrs) {
1243 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1244 AttributeMask &AM, AttrBuilder &) -> bool {
1245 if (!AttrSet.hasAttribute(Attr))
1246 return false;
1247 AM.addAttribute(Attr);
1248 return true;
1249 };
1250
1251 return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1252 }
1253
manifestAttrs(const IRPosition & IRP,ArrayRef<Attribute> Attrs,bool ForceReplace)1254 ChangeStatus Attributor::manifestAttrs(const IRPosition &IRP,
1255 ArrayRef<Attribute> Attrs,
1256 bool ForceReplace) {
1257 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1258 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1259 AttributeMask &, AttrBuilder &AB) {
1260 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1261 };
1262 return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1263 }
1264
1265 const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
1266 const IRPosition
1267 IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
1268
SubsumingPositionIterator(const IRPosition & IRP)1269 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
1270 IRPositions.emplace_back(IRP);
1271
1272 // Helper to determine if operand bundles on a call site are benign or
1273 // potentially problematic. We handle only llvm.assume for now.
1274 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1275 return (isa<IntrinsicInst>(CB) &&
1276 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1277 };
1278
1279 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1280 switch (IRP.getPositionKind()) {
1281 case IRPosition::IRP_INVALID:
1282 case IRPosition::IRP_FLOAT:
1283 case IRPosition::IRP_FUNCTION:
1284 return;
1285 case IRPosition::IRP_ARGUMENT:
1286 case IRPosition::IRP_RETURNED:
1287 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1288 return;
1289 case IRPosition::IRP_CALL_SITE:
1290 assert(CB && "Expected call site!");
1291 // TODO: We need to look at the operand bundles similar to the redirection
1292 // in CallBase.
1293 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1294 if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1295 IRPositions.emplace_back(IRPosition::function(*Callee));
1296 return;
1297 case IRPosition::IRP_CALL_SITE_RETURNED:
1298 assert(CB && "Expected call site!");
1299 // TODO: We need to look at the operand bundles similar to the redirection
1300 // in CallBase.
1301 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1302 if (auto *Callee =
1303 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1304 IRPositions.emplace_back(IRPosition::returned(*Callee));
1305 IRPositions.emplace_back(IRPosition::function(*Callee));
1306 for (const Argument &Arg : Callee->args())
1307 if (Arg.hasReturnedAttr()) {
1308 IRPositions.emplace_back(
1309 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1310 IRPositions.emplace_back(
1311 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1312 IRPositions.emplace_back(IRPosition::argument(Arg));
1313 }
1314 }
1315 }
1316 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1317 return;
1318 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
1319 assert(CB && "Expected call site!");
1320 // TODO: We need to look at the operand bundles similar to the redirection
1321 // in CallBase.
1322 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1323 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1324 if (Callee) {
1325 if (Argument *Arg = IRP.getAssociatedArgument())
1326 IRPositions.emplace_back(IRPosition::argument(*Arg));
1327 IRPositions.emplace_back(IRPosition::function(*Callee));
1328 }
1329 }
1330 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1331 return;
1332 }
1333 }
1334 }
1335
verify()1336 void IRPosition::verify() {
1337 #ifdef EXPENSIVE_CHECKS
1338 switch (getPositionKind()) {
1339 case IRP_INVALID:
1340 assert((CBContext == nullptr) &&
1341 "Invalid position must not have CallBaseContext!");
1342 assert(!Enc.getOpaqueValue() &&
1343 "Expected a nullptr for an invalid position!");
1344 return;
1345 case IRP_FLOAT:
1346 assert((!isa<Argument>(&getAssociatedValue())) &&
1347 "Expected specialized kind for argument values!");
1348 return;
1349 case IRP_RETURNED:
1350 assert(isa<Function>(getAsValuePtr()) &&
1351 "Expected function for a 'returned' position!");
1352 assert(getAsValuePtr() == &getAssociatedValue() &&
1353 "Associated value mismatch!");
1354 return;
1355 case IRP_CALL_SITE_RETURNED:
1356 assert((CBContext == nullptr) &&
1357 "'call site returned' position must not have CallBaseContext!");
1358 assert((isa<CallBase>(getAsValuePtr())) &&
1359 "Expected call base for 'call site returned' position!");
1360 assert(getAsValuePtr() == &getAssociatedValue() &&
1361 "Associated value mismatch!");
1362 return;
1363 case IRP_CALL_SITE:
1364 assert((CBContext == nullptr) &&
1365 "'call site function' position must not have CallBaseContext!");
1366 assert((isa<CallBase>(getAsValuePtr())) &&
1367 "Expected call base for 'call site function' position!");
1368 assert(getAsValuePtr() == &getAssociatedValue() &&
1369 "Associated value mismatch!");
1370 return;
1371 case IRP_FUNCTION:
1372 assert(isa<Function>(getAsValuePtr()) &&
1373 "Expected function for a 'function' position!");
1374 assert(getAsValuePtr() == &getAssociatedValue() &&
1375 "Associated value mismatch!");
1376 return;
1377 case IRP_ARGUMENT:
1378 assert(isa<Argument>(getAsValuePtr()) &&
1379 "Expected argument for a 'argument' position!");
1380 assert(getAsValuePtr() == &getAssociatedValue() &&
1381 "Associated value mismatch!");
1382 return;
1383 case IRP_CALL_SITE_ARGUMENT: {
1384 assert((CBContext == nullptr) &&
1385 "'call site argument' position must not have CallBaseContext!");
1386 Use *U = getAsUsePtr();
1387 (void)U; // Silence unused variable warning.
1388 assert(U && "Expected use for a 'call site argument' position!");
1389 assert(isa<CallBase>(U->getUser()) &&
1390 "Expected call base user for a 'call site argument' position!");
1391 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1392 "Expected call base argument operand for a 'call site argument' "
1393 "position");
1394 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1395 unsigned(getCallSiteArgNo()) &&
1396 "Argument number mismatch!");
1397 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1398 return;
1399 }
1400 }
1401 #endif
1402 }
1403
1404 std::optional<Constant *>
getAssumedConstant(const IRPosition & IRP,const AbstractAttribute & AA,bool & UsedAssumedInformation)1405 Attributor::getAssumedConstant(const IRPosition &IRP,
1406 const AbstractAttribute &AA,
1407 bool &UsedAssumedInformation) {
1408 // First check all callbacks provided by outside AAs. If any of them returns
1409 // a non-null value that is different from the associated value, or
1410 // std::nullopt, we assume it's simplified.
1411 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1412 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1413 if (!SimplifiedV)
1414 return std::nullopt;
1415 if (isa_and_nonnull<Constant>(*SimplifiedV))
1416 return cast<Constant>(*SimplifiedV);
1417 return nullptr;
1418 }
1419 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1420 return C;
1421 SmallVector<AA::ValueAndContext> Values;
1422 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1423 AA::ValueScope::Interprocedural,
1424 UsedAssumedInformation)) {
1425 if (Values.empty())
1426 return std::nullopt;
1427 if (auto *C = dyn_cast_or_null<Constant>(
1428 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1429 return C;
1430 }
1431 return nullptr;
1432 }
1433
getAssumedSimplified(const IRPosition & IRP,const AbstractAttribute * AA,bool & UsedAssumedInformation,AA::ValueScope S)1434 std::optional<Value *> Attributor::getAssumedSimplified(
1435 const IRPosition &IRP, const AbstractAttribute *AA,
1436 bool &UsedAssumedInformation, AA::ValueScope S) {
1437 // First check all callbacks provided by outside AAs. If any of them returns
1438 // a non-null value that is different from the associated value, or
1439 // std::nullopt, we assume it's simplified.
1440 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1441 return CB(IRP, AA, UsedAssumedInformation);
1442
1443 SmallVector<AA::ValueAndContext> Values;
1444 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1445 return &IRP.getAssociatedValue();
1446 if (Values.empty())
1447 return std::nullopt;
1448 if (AA)
1449 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1450 return V;
1451 if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1452 IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1453 return nullptr;
1454 return &IRP.getAssociatedValue();
1455 }
1456
getAssumedSimplifiedValues(const IRPosition & InitialIRP,const AbstractAttribute * AA,SmallVectorImpl<AA::ValueAndContext> & Values,AA::ValueScope S,bool & UsedAssumedInformation,bool RecurseForSelectAndPHI)1457 bool Attributor::getAssumedSimplifiedValues(
1458 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1459 SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1460 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1461 SmallPtrSet<Value *, 8> Seen;
1462 SmallVector<IRPosition, 8> Worklist;
1463 Worklist.push_back(InitialIRP);
1464 while (!Worklist.empty()) {
1465 const IRPosition &IRP = Worklist.pop_back_val();
1466
1467 // First check all callbacks provided by outside AAs. If any of them returns
1468 // a non-null value that is different from the associated value, or
1469 // std::nullopt, we assume it's simplified.
1470 int NV = Values.size();
1471 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1472 for (const auto &CB : SimplificationCBs) {
1473 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1474 if (!CBResult.has_value())
1475 continue;
1476 Value *V = *CBResult;
1477 if (!V)
1478 return false;
1479 if ((S & AA::ValueScope::Interprocedural) ||
1480 AA::isValidInScope(*V, IRP.getAnchorScope()))
1481 Values.push_back(AA::ValueAndContext{*V, nullptr});
1482 else
1483 return false;
1484 }
1485 if (SimplificationCBs.empty()) {
1486 // If no high-level/outside simplification occurred, use
1487 // AAPotentialValues.
1488 const auto *PotentialValuesAA =
1489 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1490 if (PotentialValuesAA &&
1491 PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1492 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1493 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1494 Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1495 } else {
1496 // TODO: We could visit all returns and add the operands.
1497 return false;
1498 }
1499 }
1500
1501 if (!RecurseForSelectAndPHI)
1502 break;
1503
1504 for (int I = NV, E = Values.size(); I < E; ++I) {
1505 Value *V = Values[I].getValue();
1506 if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1507 continue;
1508 if (!Seen.insert(V).second)
1509 continue;
1510 // Move the last element to this slot.
1511 Values[I] = Values[E - 1];
1512 // Eliminate the last slot, adjust the indices.
1513 Values.pop_back();
1514 --E;
1515 --I;
1516 // Add a new value (select or phi) to the worklist.
1517 Worklist.push_back(IRPosition::value(*V));
1518 }
1519 }
1520 return true;
1521 }
1522
translateArgumentToCallSiteContent(std::optional<Value * > V,CallBase & CB,const AbstractAttribute & AA,bool & UsedAssumedInformation)1523 std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
1524 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1525 bool &UsedAssumedInformation) {
1526 if (!V)
1527 return V;
1528 if (*V == nullptr || isa<Constant>(*V))
1529 return V;
1530 if (auto *Arg = dyn_cast<Argument>(*V))
1531 if (CB.getCalledOperand() == Arg->getParent() &&
1532 CB.arg_size() > Arg->getArgNo())
1533 if (!Arg->hasPointeeInMemoryValueAttr())
1534 return getAssumedSimplified(
1535 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1536 UsedAssumedInformation, AA::Intraprocedural);
1537 return nullptr;
1538 }
1539
~Attributor()1540 Attributor::~Attributor() {
1541 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1542 // thus we cannot delete them. We can, and want to, destruct them though.
1543 for (auto &It : AAMap) {
1544 AbstractAttribute *AA = It.getSecond();
1545 AA->~AbstractAttribute();
1546 }
1547 }
1548
isAssumedDead(const AbstractAttribute & AA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1549 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1550 const AAIsDead *FnLivenessAA,
1551 bool &UsedAssumedInformation,
1552 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1553 if (!Configuration.UseLiveness)
1554 return false;
1555 const IRPosition &IRP = AA.getIRPosition();
1556 if (!Functions.count(IRP.getAnchorScope()))
1557 return false;
1558 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1559 CheckBBLivenessOnly, DepClass);
1560 }
1561
isAssumedDead(const Use & U,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1562 bool Attributor::isAssumedDead(const Use &U,
1563 const AbstractAttribute *QueryingAA,
1564 const AAIsDead *FnLivenessAA,
1565 bool &UsedAssumedInformation,
1566 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1567 if (!Configuration.UseLiveness)
1568 return false;
1569 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1570 if (!UserI)
1571 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1572 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1573
1574 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1575 // For call site argument uses we can check if the argument is
1576 // unused/dead.
1577 if (CB->isArgOperand(&U)) {
1578 const IRPosition &CSArgPos =
1579 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1580 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1581 UsedAssumedInformation, CheckBBLivenessOnly,
1582 DepClass);
1583 }
1584 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1585 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1586 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1587 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1588 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1589 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1590 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1591 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1592 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1593 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1594 const IRPosition IRP = IRPosition::inst(*SI);
1595 const AAIsDead *IsDeadAA =
1596 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1597 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1598 if (QueryingAA)
1599 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1600 if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1601 UsedAssumedInformation = true;
1602 return true;
1603 }
1604 }
1605 }
1606
1607 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1608 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1609 }
1610
isAssumedDead(const Instruction & I,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass,bool CheckForDeadStore)1611 bool Attributor::isAssumedDead(const Instruction &I,
1612 const AbstractAttribute *QueryingAA,
1613 const AAIsDead *FnLivenessAA,
1614 bool &UsedAssumedInformation,
1615 bool CheckBBLivenessOnly, DepClassTy DepClass,
1616 bool CheckForDeadStore) {
1617 if (!Configuration.UseLiveness)
1618 return false;
1619 const IRPosition::CallBaseContext *CBCtx =
1620 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1621
1622 if (ManifestAddedBlocks.contains(I.getParent()))
1623 return false;
1624
1625 const Function &F = *I.getFunction();
1626 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1627 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1628 QueryingAA, DepClassTy::NONE);
1629
1630 // Don't use recursive reasoning.
1631 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1632 return false;
1633
1634 // If we have a context instruction and a liveness AA we use it.
1635 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1636 : FnLivenessAA->isAssumedDead(&I)) {
1637 if (QueryingAA)
1638 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1639 if (!FnLivenessAA->isKnownDead(&I))
1640 UsedAssumedInformation = true;
1641 return true;
1642 }
1643
1644 if (CheckBBLivenessOnly)
1645 return false;
1646
1647 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1648 const AAIsDead *IsDeadAA =
1649 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1650
1651 // Don't use recursive reasoning.
1652 if (!IsDeadAA || QueryingAA == IsDeadAA)
1653 return false;
1654
1655 if (IsDeadAA->isAssumedDead()) {
1656 if (QueryingAA)
1657 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1658 if (!IsDeadAA->isKnownDead())
1659 UsedAssumedInformation = true;
1660 return true;
1661 }
1662
1663 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1664 if (QueryingAA)
1665 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1666 if (!IsDeadAA->isKnownDead())
1667 UsedAssumedInformation = true;
1668 return true;
1669 }
1670
1671 return false;
1672 }
1673
isAssumedDead(const IRPosition & IRP,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1674 bool Attributor::isAssumedDead(const IRPosition &IRP,
1675 const AbstractAttribute *QueryingAA,
1676 const AAIsDead *FnLivenessAA,
1677 bool &UsedAssumedInformation,
1678 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1679 if (!Configuration.UseLiveness)
1680 return false;
1681 // Don't check liveness for constants, e.g. functions, used as (floating)
1682 // values since the context instruction and such is here meaningless.
1683 if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
1684 isa<Constant>(IRP.getAssociatedValue())) {
1685 return false;
1686 }
1687
1688 Instruction *CtxI = IRP.getCtxI();
1689 if (CtxI &&
1690 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1691 /* CheckBBLivenessOnly */ true,
1692 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1693 return true;
1694
1695 if (CheckBBLivenessOnly)
1696 return false;
1697
1698 // If we haven't succeeded we query the specific liveness info for the IRP.
1699 const AAIsDead *IsDeadAA;
1700 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1701 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1702 IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
1703 QueryingAA, DepClassTy::NONE);
1704 else
1705 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1706
1707 // Don't use recursive reasoning.
1708 if (!IsDeadAA || QueryingAA == IsDeadAA)
1709 return false;
1710
1711 if (IsDeadAA->isAssumedDead()) {
1712 if (QueryingAA)
1713 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1714 if (!IsDeadAA->isKnownDead())
1715 UsedAssumedInformation = true;
1716 return true;
1717 }
1718
1719 return false;
1720 }
1721
isAssumedDead(const BasicBlock & BB,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,DepClassTy DepClass)1722 bool Attributor::isAssumedDead(const BasicBlock &BB,
1723 const AbstractAttribute *QueryingAA,
1724 const AAIsDead *FnLivenessAA,
1725 DepClassTy DepClass) {
1726 if (!Configuration.UseLiveness)
1727 return false;
1728 const Function &F = *BB.getParent();
1729 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1730 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1731 QueryingAA, DepClassTy::NONE);
1732
1733 // Don't use recursive reasoning.
1734 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1735 return false;
1736
1737 if (FnLivenessAA->isAssumedDead(&BB)) {
1738 if (QueryingAA)
1739 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1740 return true;
1741 }
1742
1743 return false;
1744 }
1745
checkForAllCallees(function_ref<bool (ArrayRef<const Function * >)> Pred,const AbstractAttribute & QueryingAA,const CallBase & CB)1746 bool Attributor::checkForAllCallees(
1747 function_ref<bool(ArrayRef<const Function *>)> Pred,
1748 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1749 if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1750 return Pred(Callee);
1751
1752 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1753 QueryingAA, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
1754 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1755 return false;
1756
1757 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1758 return Pred(Callees.getArrayRef());
1759 }
1760
canMarkAsVisited(const User * Usr)1761 bool canMarkAsVisited(const User *Usr) {
1762 return isa<PHINode>(Usr) || !isa<Instruction>(Usr);
1763 }
1764
checkForAllUses(function_ref<bool (const Use &,bool &)> Pred,const AbstractAttribute & QueryingAA,const Value & V,bool CheckBBLivenessOnly,DepClassTy LivenessDepClass,bool IgnoreDroppableUses,function_ref<bool (const Use & OldU,const Use & NewU)> EquivalentUseCB)1765 bool Attributor::checkForAllUses(
1766 function_ref<bool(const Use &, bool &)> Pred,
1767 const AbstractAttribute &QueryingAA, const Value &V,
1768 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1769 bool IgnoreDroppableUses,
1770 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1771
1772 // Check virtual uses first.
1773 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1774 if (!CB(*this, &QueryingAA))
1775 return false;
1776
1777 if (isa<ConstantData>(V))
1778 return false;
1779
1780 // Check the trivial case first as it catches void values.
1781 if (V.use_empty())
1782 return true;
1783
1784 const IRPosition &IRP = QueryingAA.getIRPosition();
1785 SmallVector<const Use *, 16> Worklist;
1786 SmallPtrSet<const Use *, 16> Visited;
1787
1788 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1789 for (const Use &UU : V.uses()) {
1790 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1791 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1792 "rejected by the equivalence call back: "
1793 << *UU << "!\n");
1794 return false;
1795 }
1796
1797 Worklist.push_back(&UU);
1798 }
1799 return true;
1800 };
1801
1802 AddUsers(V, /* OldUse */ nullptr);
1803
1804 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1805 << " initial uses to check\n");
1806
1807 const Function *ScopeFn = IRP.getAnchorScope();
1808 const auto *LivenessAA =
1809 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1810 DepClassTy::NONE)
1811 : nullptr;
1812
1813 while (!Worklist.empty()) {
1814 const Use *U = Worklist.pop_back_val();
1815 if (canMarkAsVisited(U->getUser()) && !Visited.insert(U).second)
1816 continue;
1817 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1818 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1819 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1820 << "\n";
1821 else
1822 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1823 << "\n";
1824 });
1825 bool UsedAssumedInformation = false;
1826 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1827 CheckBBLivenessOnly, LivenessDepClass)) {
1828 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1829 dbgs() << "[Attributor] Dead use, skip!\n");
1830 continue;
1831 }
1832 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1833 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1834 dbgs() << "[Attributor] Droppable user, skip!\n");
1835 continue;
1836 }
1837
1838 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1839 if (&SI->getOperandUse(0) == U) {
1840 if (!Visited.insert(U).second)
1841 continue;
1842 SmallSetVector<Value *, 4> PotentialCopies;
1843 if (AA::getPotentialCopiesOfStoredValue(
1844 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1845 /* OnlyExact */ true)) {
1846 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1847 dbgs()
1848 << "[Attributor] Value is stored, continue with "
1849 << PotentialCopies.size()
1850 << " potential copies instead!\n");
1851 for (Value *PotentialCopy : PotentialCopies)
1852 if (!AddUsers(*PotentialCopy, U))
1853 return false;
1854 continue;
1855 }
1856 }
1857 }
1858
1859 bool Follow = false;
1860 if (!Pred(*U, Follow))
1861 return false;
1862 if (!Follow)
1863 continue;
1864
1865 User &Usr = *U->getUser();
1866 AddUsers(Usr, /* OldUse */ nullptr);
1867 }
1868
1869 return true;
1870 }
1871
checkForAllCallSites(function_ref<bool (AbstractCallSite)> Pred,const AbstractAttribute & QueryingAA,bool RequireAllCallSites,bool & UsedAssumedInformation)1872 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1873 const AbstractAttribute &QueryingAA,
1874 bool RequireAllCallSites,
1875 bool &UsedAssumedInformation) {
1876 // We can try to determine information from
1877 // the call sites. However, this is only possible all call sites are known,
1878 // hence the function has internal linkage.
1879 const IRPosition &IRP = QueryingAA.getIRPosition();
1880 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1881 if (!AssociatedFunction) {
1882 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1883 << "\n");
1884 return false;
1885 }
1886
1887 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1888 &QueryingAA, UsedAssumedInformation);
1889 }
1890
checkForAllCallSites(function_ref<bool (AbstractCallSite)> Pred,const Function & Fn,bool RequireAllCallSites,const AbstractAttribute * QueryingAA,bool & UsedAssumedInformation,bool CheckPotentiallyDead)1891 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1892 const Function &Fn,
1893 bool RequireAllCallSites,
1894 const AbstractAttribute *QueryingAA,
1895 bool &UsedAssumedInformation,
1896 bool CheckPotentiallyDead) {
1897 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1898 LLVM_DEBUG(
1899 dbgs()
1900 << "[Attributor] Function " << Fn.getName()
1901 << " has no internal linkage, hence not all call sites are known\n");
1902 return false;
1903 }
1904 // Check virtual uses first.
1905 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1906 if (!CB(*this, QueryingAA))
1907 return false;
1908
1909 SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
1910 for (unsigned u = 0; u < Uses.size(); ++u) {
1911 const Use &U = *Uses[u];
1912 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1913 if (auto *Fn = dyn_cast<Function>(U))
1914 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1915 << *U.getUser() << "\n";
1916 else
1917 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1918 << "\n";
1919 });
1920 if (!CheckPotentiallyDead &&
1921 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1922 /* CheckBBLivenessOnly */ true)) {
1923 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1924 dbgs() << "[Attributor] Dead use, skip!\n");
1925 continue;
1926 }
1927 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1928 if (CE->isCast() && CE->getType()->isPointerTy()) {
1929 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1930 dbgs() << "[Attributor] Use, is constant cast expression, add "
1931 << CE->getNumUses() << " uses of that expression instead!\n";
1932 });
1933 for (const Use &CEU : CE->uses())
1934 Uses.push_back(&CEU);
1935 continue;
1936 }
1937 }
1938
1939 AbstractCallSite ACS(&U);
1940 if (!ACS) {
1941 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1942 << " has non call site use " << *U.get() << " in "
1943 << *U.getUser() << "\n");
1944 return false;
1945 }
1946
1947 const Use *EffectiveUse =
1948 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1949 if (!ACS.isCallee(EffectiveUse)) {
1950 if (!RequireAllCallSites) {
1951 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1952 << " is not a call of " << Fn.getName()
1953 << ", skip use\n");
1954 continue;
1955 }
1956 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1957 << " is an invalid use of " << Fn.getName() << "\n");
1958 return false;
1959 }
1960
1961 // Make sure the arguments that can be matched between the call site and the
1962 // callee argee on their type. It is unlikely they do not and it doesn't
1963 // make sense for all attributes to know/care about this.
1964 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1965 unsigned MinArgsParams =
1966 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1967 for (unsigned u = 0; u < MinArgsParams; ++u) {
1968 Value *CSArgOp = ACS.getCallArgOperand(u);
1969 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1970 LLVM_DEBUG(
1971 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1972 << u << "@" << Fn.getName() << ": "
1973 << *Fn.getArg(u)->getType() << " vs. "
1974 << *ACS.getCallArgOperand(u)->getType() << "\n");
1975 return false;
1976 }
1977 }
1978
1979 if (Pred(ACS))
1980 continue;
1981
1982 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1983 << *ACS.getInstruction() << "\n");
1984 return false;
1985 }
1986
1987 return true;
1988 }
1989
shouldPropagateCallBaseContext(const IRPosition & IRP)1990 bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1991 // TODO: Maintain a cache of Values that are
1992 // on the pathway from a Argument to a Instruction that would effect the
1993 // liveness/return state etc.
1994 return EnableCallSiteSpecific;
1995 }
1996
checkForAllReturnedValues(function_ref<bool (Value &)> Pred,const AbstractAttribute & QueryingAA,AA::ValueScope S,bool RecurseForSelectAndPHI)1997 bool Attributor::checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
1998 const AbstractAttribute &QueryingAA,
1999 AA::ValueScope S,
2000 bool RecurseForSelectAndPHI) {
2001
2002 const IRPosition &IRP = QueryingAA.getIRPosition();
2003 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2004 if (!AssociatedFunction)
2005 return false;
2006
2007 bool UsedAssumedInformation = false;
2008 SmallVector<AA::ValueAndContext> Values;
2009 if (!getAssumedSimplifiedValues(
2010 IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2011 UsedAssumedInformation, RecurseForSelectAndPHI))
2012 return false;
2013
2014 return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2015 return Pred(*VAC.getValue());
2016 });
2017 }
2018
checkForAllInstructionsImpl(Attributor * A,InformationCache::OpcodeInstMapTy & OpcodeInstMap,function_ref<bool (Instruction &)> Pred,const AbstractAttribute * QueryingAA,const AAIsDead * LivenessAA,ArrayRef<unsigned> Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly=false,bool CheckPotentiallyDead=false)2019 static bool checkForAllInstructionsImpl(
2020 Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
2021 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2022 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2023 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2024 bool CheckPotentiallyDead = false) {
2025 for (unsigned Opcode : Opcodes) {
2026 // Check if we have instructions with this opcode at all first.
2027 auto *Insts = OpcodeInstMap.lookup(Opcode);
2028 if (!Insts)
2029 continue;
2030
2031 for (Instruction *I : *Insts) {
2032 // Skip dead instructions.
2033 if (A && !CheckPotentiallyDead &&
2034 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2035 UsedAssumedInformation, CheckBBLivenessOnly)) {
2036 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2037 dbgs() << "[Attributor] Instruction " << *I
2038 << " is potentially dead, skip!\n";);
2039 continue;
2040 }
2041
2042 if (!Pred(*I))
2043 return false;
2044 }
2045 }
2046 return true;
2047 }
2048
checkForAllInstructions(function_ref<bool (Instruction &)> Pred,const Function * Fn,const AbstractAttribute * QueryingAA,ArrayRef<unsigned> Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,bool CheckPotentiallyDead)2049 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2050 const Function *Fn,
2051 const AbstractAttribute *QueryingAA,
2052 ArrayRef<unsigned> Opcodes,
2053 bool &UsedAssumedInformation,
2054 bool CheckBBLivenessOnly,
2055 bool CheckPotentiallyDead) {
2056 // Since we need to provide instructions we have to have an exact definition.
2057 if (!Fn || Fn->isDeclaration())
2058 return false;
2059
2060 const IRPosition &QueryIRP = IRPosition::function(*Fn);
2061 const auto *LivenessAA =
2062 CheckPotentiallyDead && QueryingAA
2063 ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2064 : nullptr;
2065
2066 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2067 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2068 LivenessAA, Opcodes, UsedAssumedInformation,
2069 CheckBBLivenessOnly, CheckPotentiallyDead))
2070 return false;
2071
2072 return true;
2073 }
2074
checkForAllInstructions(function_ref<bool (Instruction &)> Pred,const AbstractAttribute & QueryingAA,ArrayRef<unsigned> Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,bool CheckPotentiallyDead)2075 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2076 const AbstractAttribute &QueryingAA,
2077 ArrayRef<unsigned> Opcodes,
2078 bool &UsedAssumedInformation,
2079 bool CheckBBLivenessOnly,
2080 bool CheckPotentiallyDead) {
2081 const IRPosition &IRP = QueryingAA.getIRPosition();
2082 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2083 return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2084 UsedAssumedInformation, CheckBBLivenessOnly,
2085 CheckPotentiallyDead);
2086 }
2087
checkForAllReadWriteInstructions(function_ref<bool (Instruction &)> Pred,AbstractAttribute & QueryingAA,bool & UsedAssumedInformation)2088 bool Attributor::checkForAllReadWriteInstructions(
2089 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2090 bool &UsedAssumedInformation) {
2091 TimeTraceScope TS("checkForAllReadWriteInstructions");
2092
2093 const Function *AssociatedFunction =
2094 QueryingAA.getIRPosition().getAssociatedFunction();
2095 if (!AssociatedFunction)
2096 return false;
2097
2098 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2099 const auto *LivenessAA =
2100 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2101
2102 for (Instruction *I :
2103 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2104 // Skip dead instructions.
2105 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2106 UsedAssumedInformation))
2107 continue;
2108
2109 if (!Pred(*I))
2110 return false;
2111 }
2112
2113 return true;
2114 }
2115
runTillFixpoint()2116 void Attributor::runTillFixpoint() {
2117 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2118 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2119 << DG.SyntheticRoot.Deps.size()
2120 << " abstract attributes.\n");
2121
2122 // Now that all abstract attributes are collected and initialized we start
2123 // the abstract analysis.
2124
2125 unsigned IterationCounter = 1;
2126 unsigned MaxIterations =
2127 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2128
2129 SmallVector<AbstractAttribute *, 32> ChangedAAs;
2130 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2131 Worklist.insert_range(DG.SyntheticRoot);
2132
2133 do {
2134 // Remember the size to determine new attributes.
2135 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2136 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2137 << ", Worklist size: " << Worklist.size() << "\n");
2138
2139 // For invalid AAs we can fix dependent AAs that have a required dependence,
2140 // thereby folding long dependence chains in a single step without the need
2141 // to run updates.
2142 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2143 AbstractAttribute *InvalidAA = InvalidAAs[u];
2144
2145 // Check the dependences to fast track invalidation.
2146 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2147 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2148 << " has " << InvalidAA->Deps.size()
2149 << " required & optional dependences\n");
2150 for (auto &DepIt : InvalidAA->Deps) {
2151 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2152 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2153 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2154 dbgs() << " - recompute: " << *DepAA);
2155 Worklist.insert(DepAA);
2156 continue;
2157 }
2158 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
2159 << " - invalidate: " << *DepAA);
2160 DepAA->getState().indicatePessimisticFixpoint();
2161 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2162 if (!DepAA->getState().isValidState())
2163 InvalidAAs.insert(DepAA);
2164 else
2165 ChangedAAs.push_back(DepAA);
2166 }
2167 InvalidAA->Deps.clear();
2168 }
2169
2170 // Add all abstract attributes that are potentially dependent on one that
2171 // changed to the work list.
2172 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2173 for (auto &DepIt : ChangedAA->Deps)
2174 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2175 ChangedAA->Deps.clear();
2176 }
2177
2178 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2179 << ", Worklist+Dependent size: " << Worklist.size()
2180 << "\n");
2181
2182 // Reset the changed and invalid set.
2183 ChangedAAs.clear();
2184 InvalidAAs.clear();
2185
2186 // Update all abstract attribute in the work list and record the ones that
2187 // changed.
2188 for (AbstractAttribute *AA : Worklist) {
2189 const auto &AAState = AA->getState();
2190 if (!AAState.isAtFixpoint())
2191 if (updateAA(*AA) == ChangeStatus::CHANGED)
2192 ChangedAAs.push_back(AA);
2193
2194 // Use the InvalidAAs vector to propagate invalid states fast transitively
2195 // without requiring updates.
2196 if (!AAState.isValidState())
2197 InvalidAAs.insert(AA);
2198 }
2199
2200 // Add attributes to the changed set if they have been created in the last
2201 // iteration.
2202 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2203 DG.SyntheticRoot.end());
2204
2205 // Reset the work list and repopulate with the changed abstract attributes.
2206 // Note that dependent ones are added above.
2207 Worklist.clear();
2208 Worklist.insert_range(ChangedAAs);
2209 Worklist.insert_range(QueryAAsAwaitingUpdate);
2210 QueryAAsAwaitingUpdate.clear();
2211
2212 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2213
2214 if (IterationCounter > MaxIterations && !Functions.empty()) {
2215 auto Remark = [&](OptimizationRemarkMissed ORM) {
2216 return ORM << "Attributor did not reach a fixpoint after "
2217 << ore::NV("Iterations", MaxIterations) << " iterations.";
2218 };
2219 Function *F = Functions.front();
2220 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2221 }
2222
2223 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2224 << IterationCounter << "/" << MaxIterations
2225 << " iterations\n");
2226
2227 // Reset abstract arguments not settled in a sound fixpoint by now. This
2228 // happens when we stopped the fixpoint iteration early. Note that only the
2229 // ones marked as "changed" *and* the ones transitively depending on them
2230 // need to be reverted to a pessimistic state. Others might not be in a
2231 // fixpoint state but we can use the optimistic results for them anyway.
2232 SmallPtrSet<AbstractAttribute *, 32> Visited;
2233 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2234 AbstractAttribute *ChangedAA = ChangedAAs[u];
2235 if (!Visited.insert(ChangedAA).second)
2236 continue;
2237
2238 AbstractState &State = ChangedAA->getState();
2239 if (!State.isAtFixpoint()) {
2240 State.indicatePessimisticFixpoint();
2241
2242 NumAttributesTimedOut++;
2243 }
2244
2245 for (auto &DepIt : ChangedAA->Deps)
2246 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2247 ChangedAA->Deps.clear();
2248 }
2249
2250 LLVM_DEBUG({
2251 if (!Visited.empty())
2252 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2253 << " abstract attributes.\n";
2254 });
2255 }
2256
registerForUpdate(AbstractAttribute & AA)2257 void Attributor::registerForUpdate(AbstractAttribute &AA) {
2258 assert(AA.isQueryAA() &&
2259 "Non-query AAs should not be required to register for updates!");
2260 QueryAAsAwaitingUpdate.insert(&AA);
2261 }
2262
manifestAttributes()2263 ChangeStatus Attributor::manifestAttributes() {
2264 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2265 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2266
2267 unsigned NumManifested = 0;
2268 unsigned NumAtFixpoint = 0;
2269 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2270 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2271 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2272 AbstractState &State = AA->getState();
2273
2274 // If there is not already a fixpoint reached, we can now take the
2275 // optimistic state. This is correct because we enforced a pessimistic one
2276 // on abstract attributes that were transitively dependent on a changed one
2277 // already above.
2278 if (!State.isAtFixpoint())
2279 State.indicateOptimisticFixpoint();
2280
2281 // We must not manifest Attributes that use Callbase info.
2282 if (AA->hasCallBaseContext())
2283 continue;
2284 // If the state is invalid, we do not try to manifest it.
2285 if (!State.isValidState())
2286 continue;
2287
2288 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2289 continue;
2290
2291 // Skip dead code.
2292 bool UsedAssumedInformation = false;
2293 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2294 /* CheckBBLivenessOnly */ true))
2295 continue;
2296 // Check if the manifest debug counter that allows skipping manifestation of
2297 // AAs
2298 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2299 continue;
2300 // Manifest the state and record if we changed the IR.
2301 ChangeStatus LocalChange = AA->manifest(*this);
2302 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2303 AA->trackStatistics();
2304 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2305 << "\n");
2306
2307 ManifestChange = ManifestChange | LocalChange;
2308
2309 NumAtFixpoint++;
2310 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2311 }
2312
2313 (void)NumManifested;
2314 (void)NumAtFixpoint;
2315 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2316 << " arguments while " << NumAtFixpoint
2317 << " were in a valid fixpoint state\n");
2318
2319 NumAttributesManifested += NumManifested;
2320 NumAttributesValidFixpoint += NumAtFixpoint;
2321
2322 (void)NumFinalAAs;
2323 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2324 auto DepIt = DG.SyntheticRoot.Deps.begin();
2325 for (unsigned u = 0; u < NumFinalAAs; ++u)
2326 ++DepIt;
2327 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2328 ++u, ++DepIt) {
2329 errs() << "Unexpected abstract attribute: "
2330 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2331 << cast<AbstractAttribute>(DepIt->getPointer())
2332 ->getIRPosition()
2333 .getAssociatedValue()
2334 << "\n";
2335 }
2336 llvm_unreachable("Expected the final number of abstract attributes to "
2337 "remain unchanged!");
2338 }
2339
2340 for (auto &It : AttrsMap) {
2341 AttributeList &AL = It.getSecond();
2342 const IRPosition &IRP =
2343 isa<Function>(It.getFirst())
2344 ? IRPosition::function(*cast<Function>(It.getFirst()))
2345 : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2346 IRP.setAttrList(AL);
2347 }
2348
2349 return ManifestChange;
2350 }
2351
identifyDeadInternalFunctions()2352 void Attributor::identifyDeadInternalFunctions() {
2353 // Early exit if we don't intend to delete functions.
2354 if (!Configuration.DeleteFns)
2355 return;
2356
2357 // To avoid triggering an assertion in the lazy call graph we will not delete
2358 // any internal library functions. We should modify the assertion though and
2359 // allow internals to be deleted.
2360 const auto *TLI =
2361 isModulePass()
2362 ? nullptr
2363 : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
2364 LibFunc LF;
2365
2366 // Identify dead internal functions and delete them. This happens outside
2367 // the other fixpoint analysis as we might treat potentially dead functions
2368 // as live to lower the number of iterations. If they happen to be dead, the
2369 // below fixpoint loop will identify and eliminate them.
2370
2371 SmallVector<Function *, 8> InternalFns;
2372 for (Function *F : Functions)
2373 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2374 InternalFns.push_back(F);
2375
2376 SmallPtrSet<Function *, 8> LiveInternalFns;
2377 bool FoundLiveInternal = true;
2378 while (FoundLiveInternal) {
2379 FoundLiveInternal = false;
2380 for (Function *&F : InternalFns) {
2381 if (!F)
2382 continue;
2383
2384 bool UsedAssumedInformation = false;
2385 if (checkForAllCallSites(
2386 [&](AbstractCallSite ACS) {
2387 Function *Callee = ACS.getInstruction()->getFunction();
2388 return ToBeDeletedFunctions.count(Callee) ||
2389 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2390 !LiveInternalFns.count(Callee));
2391 },
2392 *F, true, nullptr, UsedAssumedInformation)) {
2393 continue;
2394 }
2395
2396 LiveInternalFns.insert(F);
2397 F = nullptr;
2398 FoundLiveInternal = true;
2399 }
2400 }
2401
2402 for (Function *F : InternalFns)
2403 if (F)
2404 ToBeDeletedFunctions.insert(F);
2405 }
2406
cleanupIR()2407 ChangeStatus Attributor::cleanupIR() {
2408 TimeTraceScope TimeScope("Attributor::cleanupIR");
2409 // Delete stuff at the end to avoid invalid references and a nice order.
2410 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2411 << ToBeDeletedFunctions.size() << " functions and "
2412 << ToBeDeletedBlocks.size() << " blocks and "
2413 << ToBeDeletedInsts.size() << " instructions and "
2414 << ToBeChangedValues.size() << " values and "
2415 << ToBeChangedUses.size() << " uses. To insert "
2416 << ToBeChangedToUnreachableInsts.size()
2417 << " unreachables.\n"
2418 << "Preserve manifest added " << ManifestAddedBlocks.size()
2419 << " blocks\n");
2420
2421 SmallVector<WeakTrackingVH, 32> DeadInsts;
2422 SmallVector<Instruction *, 32> TerminatorsToFold;
2423
2424 auto ReplaceUse = [&](Use *U, Value *NewV) {
2425 Value *OldV = U->get();
2426
2427 // If we plan to replace NewV we need to update it at this point.
2428 do {
2429 const auto &Entry = ToBeChangedValues.lookup(NewV);
2430 if (!get<0>(Entry))
2431 break;
2432 NewV = get<0>(Entry);
2433 } while (true);
2434
2435 Instruction *I = dyn_cast<Instruction>(U->getUser());
2436 assert((!I || isRunOn(*I->getFunction())) &&
2437 "Cannot replace an instruction outside the current SCC!");
2438
2439 // Do not replace uses in returns if the value is a must-tail call we will
2440 // not delete.
2441 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2442 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2443 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2444 return;
2445 // If we rewrite a return and the new value is not an argument, strip the
2446 // `returned` attribute as it is wrong now.
2447 if (!isa<Argument>(NewV))
2448 for (auto &Arg : RI->getFunction()->args())
2449 Arg.removeAttr(Attribute::Returned);
2450 }
2451
2452 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2453 << " instead of " << *OldV << "\n");
2454 U->set(NewV);
2455
2456 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2457 CGModifiedFunctions.insert(I->getFunction());
2458 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2459 isInstructionTriviallyDead(I))
2460 DeadInsts.push_back(I);
2461 }
2462 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2463 auto *CB = cast<CallBase>(U->getUser());
2464 if (CB->isArgOperand(U)) {
2465 unsigned Idx = CB->getArgOperandNo(U);
2466 CB->removeParamAttr(Idx, Attribute::NoUndef);
2467 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2468 if (Callee && Callee->arg_size() > Idx)
2469 Callee->removeParamAttr(Idx, Attribute::NoUndef);
2470 }
2471 }
2472 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2473 Instruction *UserI = cast<Instruction>(U->getUser());
2474 if (isa<UndefValue>(NewV)) {
2475 ToBeChangedToUnreachableInsts.insert(UserI);
2476 } else {
2477 TerminatorsToFold.push_back(UserI);
2478 }
2479 }
2480 };
2481
2482 for (auto &It : ToBeChangedUses) {
2483 Use *U = It.first;
2484 Value *NewV = It.second;
2485 ReplaceUse(U, NewV);
2486 }
2487
2488 SmallVector<Use *, 4> Uses;
2489 for (auto &It : ToBeChangedValues) {
2490 Value *OldV = It.first;
2491 auto [NewV, Done] = It.second;
2492 Uses.clear();
2493 for (auto &U : OldV->uses())
2494 if (Done || !U.getUser()->isDroppable())
2495 Uses.push_back(&U);
2496 for (Use *U : Uses) {
2497 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2498 if (!isRunOn(*I->getFunction()))
2499 continue;
2500 ReplaceUse(U, NewV);
2501 }
2502 }
2503
2504 for (const auto &V : InvokeWithDeadSuccessor)
2505 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2506 assert(isRunOn(*II->getFunction()) &&
2507 "Cannot replace an invoke outside the current SCC!");
2508 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2509 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2510 bool Invoke2CallAllowed =
2511 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2512 assert((UnwindBBIsDead || NormalBBIsDead) &&
2513 "Invoke does not have dead successors!");
2514 BasicBlock *BB = II->getParent();
2515 BasicBlock *NormalDestBB = II->getNormalDest();
2516 if (UnwindBBIsDead) {
2517 Instruction *NormalNextIP = &NormalDestBB->front();
2518 if (Invoke2CallAllowed) {
2519 changeToCall(II);
2520 NormalNextIP = BB->getTerminator();
2521 }
2522 if (NormalBBIsDead)
2523 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2524 } else {
2525 assert(NormalBBIsDead && "Broken invariant!");
2526 if (!NormalDestBB->getUniquePredecessor())
2527 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2528 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2529 }
2530 }
2531 for (Instruction *I : TerminatorsToFold) {
2532 assert(isRunOn(*I->getFunction()) &&
2533 "Cannot replace a terminator outside the current SCC!");
2534 CGModifiedFunctions.insert(I->getFunction());
2535 ConstantFoldTerminator(I->getParent());
2536 }
2537 for (const auto &V : ToBeChangedToUnreachableInsts)
2538 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2539 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2540 << "\n");
2541 assert(isRunOn(*I->getFunction()) &&
2542 "Cannot replace an instruction outside the current SCC!");
2543 CGModifiedFunctions.insert(I->getFunction());
2544 changeToUnreachable(I);
2545 }
2546
2547 for (const auto &V : ToBeDeletedInsts) {
2548 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2549 assert((!isa<CallBase>(I) || isa<IntrinsicInst>(I) ||
2550 isRunOn(*I->getFunction())) &&
2551 "Cannot delete an instruction outside the current SCC!");
2552 I->dropDroppableUses();
2553 CGModifiedFunctions.insert(I->getFunction());
2554 if (!I->getType()->isVoidTy())
2555 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2556 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2557 DeadInsts.push_back(I);
2558 else
2559 I->eraseFromParent();
2560 }
2561 }
2562
2563 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2564
2565 LLVM_DEBUG({
2566 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2567 for (auto &I : DeadInsts)
2568 if (I)
2569 dbgs() << " - " << *I << "\n";
2570 });
2571
2572 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2573
2574 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2575 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2576 ToBeDeletedBBs.reserve(NumDeadBlocks);
2577 for (BasicBlock *BB : ToBeDeletedBlocks) {
2578 assert(isRunOn(*BB->getParent()) &&
2579 "Cannot delete a block outside the current SCC!");
2580 CGModifiedFunctions.insert(BB->getParent());
2581 // Do not delete BBs added during manifests of AAs.
2582 if (ManifestAddedBlocks.contains(BB))
2583 continue;
2584 ToBeDeletedBBs.push_back(BB);
2585 }
2586 // Actually we do not delete the blocks but squash them into a single
2587 // unreachable but untangling branches that jump here is something we need
2588 // to do in a more generic way.
2589 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2590 }
2591
2592 identifyDeadInternalFunctions();
2593
2594 // Rewrite the functions as requested during manifest.
2595 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2596
2597 for (Function *Fn : CGModifiedFunctions)
2598 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2599 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2600
2601 for (Function *Fn : ToBeDeletedFunctions) {
2602 if (!Functions.count(Fn))
2603 continue;
2604 Configuration.CGUpdater.removeFunction(*Fn);
2605 }
2606
2607 if (!ToBeChangedUses.empty())
2608 ManifestChange = ChangeStatus::CHANGED;
2609
2610 if (!ToBeChangedToUnreachableInsts.empty())
2611 ManifestChange = ChangeStatus::CHANGED;
2612
2613 if (!ToBeDeletedFunctions.empty())
2614 ManifestChange = ChangeStatus::CHANGED;
2615
2616 if (!ToBeDeletedBlocks.empty())
2617 ManifestChange = ChangeStatus::CHANGED;
2618
2619 if (!ToBeDeletedInsts.empty())
2620 ManifestChange = ChangeStatus::CHANGED;
2621
2622 if (!InvokeWithDeadSuccessor.empty())
2623 ManifestChange = ChangeStatus::CHANGED;
2624
2625 if (!DeadInsts.empty())
2626 ManifestChange = ChangeStatus::CHANGED;
2627
2628 NumFnDeleted += ToBeDeletedFunctions.size();
2629
2630 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2631 << " functions after manifest.\n");
2632
2633 #ifdef EXPENSIVE_CHECKS
2634 for (Function *F : Functions) {
2635 if (ToBeDeletedFunctions.count(F))
2636 continue;
2637 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2638 }
2639 #endif
2640
2641 return ManifestChange;
2642 }
2643
run()2644 ChangeStatus Attributor::run() {
2645 TimeTraceScope TimeScope("Attributor::run");
2646 AttributorCallGraph ACallGraph(*this);
2647
2648 if (PrintCallGraph)
2649 ACallGraph.populateAll();
2650
2651 Phase = AttributorPhase::UPDATE;
2652 runTillFixpoint();
2653
2654 // dump graphs on demand
2655 if (DumpDepGraph)
2656 DG.dumpGraph();
2657
2658 if (ViewDepGraph)
2659 DG.viewGraph();
2660
2661 if (PrintDependencies)
2662 DG.print();
2663
2664 Phase = AttributorPhase::MANIFEST;
2665 ChangeStatus ManifestChange = manifestAttributes();
2666
2667 Phase = AttributorPhase::CLEANUP;
2668 ChangeStatus CleanupChange = cleanupIR();
2669
2670 if (PrintCallGraph)
2671 ACallGraph.print();
2672
2673 return ManifestChange | CleanupChange;
2674 }
2675
updateAA(AbstractAttribute & AA)2676 ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2677 TimeTraceScope TimeScope("updateAA", [&]() {
2678 return AA.getName().str() +
2679 std::to_string(AA.getIRPosition().getPositionKind());
2680 });
2681 assert(Phase == AttributorPhase::UPDATE &&
2682 "We can update AA only in the update stage!");
2683
2684 // Use a new dependence vector for this update.
2685 DependenceVector DV;
2686 DependenceStack.push_back(&DV);
2687
2688 auto &AAState = AA.getState();
2689 ChangeStatus CS = ChangeStatus::UNCHANGED;
2690 bool UsedAssumedInformation = false;
2691 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2692 /* CheckBBLivenessOnly */ true))
2693 CS = AA.update(*this);
2694
2695 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2696 // If the AA did not rely on outside information but changed, we run it
2697 // again to see if it found a fixpoint. Most AAs do but we don't require
2698 // them to. Hence, it might take the AA multiple iterations to get to a
2699 // fixpoint even if it does not rely on outside information, which is fine.
2700 ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
2701 if (CS == ChangeStatus::CHANGED)
2702 RerunCS = AA.update(*this);
2703
2704 // If the attribute did not change during the run or rerun, and it still did
2705 // not query any non-fix information, the state will not change and we can
2706 // indicate that right at this point.
2707 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2708 AAState.indicateOptimisticFixpoint();
2709 }
2710
2711 if (!AAState.isAtFixpoint())
2712 rememberDependences();
2713
2714 // Verify the stack was used properly, that is we pop the dependence vector we
2715 // put there earlier.
2716 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2717 (void)PoppedDV;
2718 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2719
2720 return CS;
2721 }
2722
createShallowWrapper(Function & F)2723 void Attributor::createShallowWrapper(Function &F) {
2724 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2725
2726 Module &M = *F.getParent();
2727 LLVMContext &Ctx = M.getContext();
2728 FunctionType *FnTy = F.getFunctionType();
2729
2730 Function *Wrapper =
2731 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2732 F.setName(""); // set the inside function anonymous
2733 M.getFunctionList().insert(F.getIterator(), Wrapper);
2734
2735 F.setLinkage(GlobalValue::InternalLinkage);
2736
2737 F.replaceAllUsesWith(Wrapper);
2738 assert(F.use_empty() && "Uses remained after wrapper was created!");
2739
2740 // Move the COMDAT section to the wrapper.
2741 // TODO: Check if we need to keep it for F as well.
2742 Wrapper->setComdat(F.getComdat());
2743 F.setComdat(nullptr);
2744
2745 // Copy all metadata and attributes but keep them on F as well.
2746 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2747 F.getAllMetadata(MDs);
2748 for (auto MDIt : MDs)
2749 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2750 Wrapper->setAttributes(F.getAttributes());
2751
2752 // Create the call in the wrapper.
2753 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2754
2755 SmallVector<Value *, 8> Args;
2756 Argument *FArgIt = F.arg_begin();
2757 for (Argument &Arg : Wrapper->args()) {
2758 Args.push_back(&Arg);
2759 Arg.setName((FArgIt++)->getName());
2760 }
2761
2762 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2763 CI->setTailCall(true);
2764 CI->addFnAttr(Attribute::NoInline);
2765 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2766
2767 NumFnShallowWrappersCreated++;
2768 }
2769
isInternalizable(Function & F)2770 bool Attributor::isInternalizable(Function &F) {
2771 if (F.isDeclaration() || F.hasLocalLinkage() ||
2772 GlobalValue::isInterposableLinkage(F.getLinkage()))
2773 return false;
2774 return true;
2775 }
2776
internalizeFunction(Function & F,bool Force)2777 Function *Attributor::internalizeFunction(Function &F, bool Force) {
2778 if (!AllowDeepWrapper && !Force)
2779 return nullptr;
2780 if (!isInternalizable(F))
2781 return nullptr;
2782
2783 SmallPtrSet<Function *, 2> FnSet = {&F};
2784 DenseMap<Function *, Function *> InternalizedFns;
2785 internalizeFunctions(FnSet, InternalizedFns);
2786
2787 return InternalizedFns[&F];
2788 }
2789
internalizeFunctions(SmallPtrSetImpl<Function * > & FnSet,DenseMap<Function *,Function * > & FnMap)2790 bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2791 DenseMap<Function *, Function *> &FnMap) {
2792 for (Function *F : FnSet)
2793 if (!Attributor::isInternalizable(*F))
2794 return false;
2795
2796 FnMap.clear();
2797 // Generate the internalized version of each function.
2798 for (Function *F : FnSet) {
2799 Module &M = *F->getParent();
2800 FunctionType *FnTy = F->getFunctionType();
2801
2802 // Create a copy of the current function
2803 Function *Copied =
2804 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2805 F->getName() + ".internalized");
2806 ValueToValueMapTy VMap;
2807 auto *NewFArgIt = Copied->arg_begin();
2808 for (auto &Arg : F->args()) {
2809 auto ArgName = Arg.getName();
2810 NewFArgIt->setName(ArgName);
2811 VMap[&Arg] = &(*NewFArgIt++);
2812 }
2813 SmallVector<ReturnInst *, 8> Returns;
2814
2815 // Copy the body of the original function to the new one
2816 CloneFunctionInto(Copied, F, VMap,
2817 CloneFunctionChangeType::LocalChangesOnly, Returns);
2818
2819 // Set the linakage and visibility late as CloneFunctionInto has some
2820 // implicit requirements.
2821 Copied->setVisibility(GlobalValue::DefaultVisibility);
2822 Copied->setLinkage(GlobalValue::PrivateLinkage);
2823
2824 // Copy metadata
2825 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2826 F->getAllMetadata(MDs);
2827 for (auto MDIt : MDs)
2828 if (!Copied->hasMetadata())
2829 Copied->addMetadata(MDIt.first, *MDIt.second);
2830
2831 M.getFunctionList().insert(F->getIterator(), Copied);
2832 Copied->setDSOLocal(true);
2833 FnMap[F] = Copied;
2834 }
2835
2836 // Replace all uses of the old function with the new internalized function
2837 // unless the caller is a function that was just internalized.
2838 for (Function *F : FnSet) {
2839 auto &InternalizedFn = FnMap[F];
2840 auto IsNotInternalized = [&](Use &U) -> bool {
2841 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2842 return !FnMap.lookup(CB->getCaller());
2843 return false;
2844 };
2845 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2846 }
2847
2848 return true;
2849 }
2850
isValidFunctionSignatureRewrite(Argument & Arg,ArrayRef<Type * > ReplacementTypes)2851 bool Attributor::isValidFunctionSignatureRewrite(
2852 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2853
2854 if (!Configuration.RewriteSignatures)
2855 return false;
2856
2857 Function *Fn = Arg.getParent();
2858 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2859 // Forbid the call site to cast the function return type. If we need to
2860 // rewrite these functions we need to re-create a cast for the new call site
2861 // (if the old had uses).
2862 if (!ACS.getCalledFunction() ||
2863 ACS.getInstruction()->getType() !=
2864 ACS.getCalledFunction()->getReturnType())
2865 return false;
2866 if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2867 Fn->getType())
2868 return false;
2869 if (ACS.getNumArgOperands() != Fn->arg_size())
2870 return false;
2871 // Forbid must-tail calls for now.
2872 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2873 };
2874
2875 // Avoid var-arg functions for now.
2876 if (Fn->isVarArg()) {
2877 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2878 return false;
2879 }
2880
2881 // Avoid functions with complicated argument passing semantics.
2882 AttributeList FnAttributeList = Fn->getAttributes();
2883 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2884 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2885 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2886 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2887 LLVM_DEBUG(
2888 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2889 return false;
2890 }
2891
2892 // Avoid callbacks for now.
2893 bool UsedAssumedInformation = false;
2894 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2895 UsedAssumedInformation,
2896 /* CheckPotentiallyDead */ true)) {
2897 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2898 return false;
2899 }
2900
2901 auto InstPred = [](Instruction &I) {
2902 if (auto *CI = dyn_cast<CallInst>(&I))
2903 return !CI->isMustTailCall();
2904 return true;
2905 };
2906
2907 // Forbid must-tail calls for now.
2908 // TODO:
2909 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2910 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2911 nullptr, {Instruction::Call},
2912 UsedAssumedInformation)) {
2913 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2914 return false;
2915 }
2916
2917 return true;
2918 }
2919
registerFunctionSignatureRewrite(Argument & Arg,ArrayRef<Type * > ReplacementTypes,ArgumentReplacementInfo::CalleeRepairCBTy && CalleeRepairCB,ArgumentReplacementInfo::ACSRepairCBTy && ACSRepairCB)2920 bool Attributor::registerFunctionSignatureRewrite(
2921 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2922 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2923 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2924 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2925 << Arg.getParent()->getName() << " with "
2926 << ReplacementTypes.size() << " replacements\n");
2927 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2928 "Cannot register an invalid rewrite");
2929
2930 Function *Fn = Arg.getParent();
2931 SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2932 ArgumentReplacementMap[Fn];
2933 if (ARIs.empty())
2934 ARIs.resize(Fn->arg_size());
2935
2936 // If we have a replacement already with less than or equal new arguments,
2937 // ignore this request.
2938 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2939 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2940 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2941 return false;
2942 }
2943
2944 // If we have a replacement already but we like the new one better, delete
2945 // the old.
2946 ARI.reset();
2947
2948 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2949 << Arg.getParent()->getName() << " with "
2950 << ReplacementTypes.size() << " replacements\n");
2951
2952 // Remember the replacement.
2953 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2954 std::move(CalleeRepairCB),
2955 std::move(ACSRepairCB)));
2956
2957 return true;
2958 }
2959
shouldSeedAttribute(AbstractAttribute & AA)2960 bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2961 bool Result = true;
2962 #ifndef NDEBUG
2963 if (SeedAllowList.size() != 0)
2964 Result = llvm::is_contained(SeedAllowList, AA.getName());
2965 Function *Fn = AA.getAnchorScope();
2966 if (FunctionSeedAllowList.size() != 0 && Fn)
2967 Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2968 #endif
2969 return Result;
2970 }
2971
rewriteFunctionSignatures(SmallSetVector<Function *,8> & ModifiedFns)2972 ChangeStatus Attributor::rewriteFunctionSignatures(
2973 SmallSetVector<Function *, 8> &ModifiedFns) {
2974 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2975
2976 for (auto &It : ArgumentReplacementMap) {
2977 Function *OldFn = It.getFirst();
2978
2979 // Deleted functions do not require rewrites.
2980 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2981 continue;
2982
2983 const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2984 It.getSecond();
2985 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2986
2987 SmallVector<Type *, 16> NewArgumentTypes;
2988 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2989
2990 // Collect replacement argument types and copy over existing attributes.
2991 AttributeList OldFnAttributeList = OldFn->getAttributes();
2992 for (Argument &Arg : OldFn->args()) {
2993 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2994 ARIs[Arg.getArgNo()]) {
2995 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2996 ARI->ReplacementTypes.end());
2997 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2998 AttributeSet());
2999 } else {
3000 NewArgumentTypes.push_back(Arg.getType());
3001 NewArgumentAttributes.push_back(
3002 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
3003 }
3004 }
3005
3006 uint64_t LargestVectorWidth = 0;
3007 for (auto *I : NewArgumentTypes)
3008 if (auto *VT = dyn_cast<llvm::VectorType>(I))
3009 LargestVectorWidth =
3010 std::max(LargestVectorWidth,
3011 VT->getPrimitiveSizeInBits().getKnownMinValue());
3012
3013 FunctionType *OldFnTy = OldFn->getFunctionType();
3014 Type *RetTy = OldFnTy->getReturnType();
3015
3016 // Construct the new function type using the new arguments types.
3017 FunctionType *NewFnTy =
3018 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3019
3020 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3021 << "' from " << *OldFn->getFunctionType() << " to "
3022 << *NewFnTy << "\n");
3023
3024 // Create the new function body and insert it into the module.
3025 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3026 OldFn->getAddressSpace(), "");
3027 Functions.insert(NewFn);
3028 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3029 NewFn->takeName(OldFn);
3030 NewFn->copyAttributesFrom(OldFn);
3031
3032 // Patch the pointer to LLVM function in debug info descriptor.
3033 NewFn->setSubprogram(OldFn->getSubprogram());
3034 OldFn->setSubprogram(nullptr);
3035
3036 // Recompute the parameter attributes list based on the new arguments for
3037 // the function.
3038 LLVMContext &Ctx = OldFn->getContext();
3039 NewFn->setAttributes(AttributeList::get(
3040 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3041 NewArgumentAttributes));
3042 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3043
3044 // Remove argmem from the memory effects if we have no more pointer
3045 // arguments, or they are readnone.
3046 MemoryEffects ME = NewFn->getMemoryEffects();
3047 int ArgNo = -1;
3048 if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3049 ++ArgNo;
3050 return !T->isPtrOrPtrVectorTy() ||
3051 NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3052 })) {
3053 NewFn->setMemoryEffects(ME - MemoryEffects::argMemOnly());
3054 }
3055
3056 // Since we have now created the new function, splice the body of the old
3057 // function right into the new function, leaving the old rotting hulk of the
3058 // function empty.
3059 NewFn->splice(NewFn->begin(), OldFn);
3060
3061 // Set of all "call-like" instructions that invoke the old function mapped
3062 // to their new replacements.
3063 SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
3064
3065 // Callback to create a new "call-like" instruction for a given one.
3066 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3067 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3068 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3069
3070 // Collect the new argument operands for the replacement call site.
3071 SmallVector<Value *, 16> NewArgOperands;
3072 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3073 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3074 unsigned NewFirstArgNum = NewArgOperands.size();
3075 (void)NewFirstArgNum; // only used inside assert.
3076 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3077 ARIs[OldArgNum]) {
3078 if (ARI->ACSRepairCB)
3079 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3080 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3081 NewArgOperands.size() &&
3082 "ACS repair callback did not provide as many operand as new "
3083 "types were registered!");
3084 // TODO: Exose the attribute set to the ACS repair callback
3085 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3086 AttributeSet());
3087 } else {
3088 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3089 NewArgOperandAttributes.push_back(
3090 OldCallAttributeList.getParamAttrs(OldArgNum));
3091 }
3092 }
3093
3094 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3095 "Mismatch # argument operands vs. # argument operand attributes!");
3096 assert(NewArgOperands.size() == NewFn->arg_size() &&
3097 "Mismatch # argument operands vs. # function arguments!");
3098
3099 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3100 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3101
3102 // Create a new call or invoke instruction to replace the old one.
3103 CallBase *NewCB;
3104 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3105 NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3106 II->getUnwindDest(), NewArgOperands,
3107 OperandBundleDefs, "", OldCB->getIterator());
3108 } else {
3109 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3110 "", OldCB->getIterator());
3111 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3112 NewCB = NewCI;
3113 }
3114
3115 // Copy over various properties and the new attributes.
3116 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3117 NewCB->setCallingConv(OldCB->getCallingConv());
3118 NewCB->takeName(OldCB);
3119 NewCB->setAttributes(AttributeList::get(
3120 Ctx, OldCallAttributeList.getFnAttrs(),
3121 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3122
3123 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
3124 LargestVectorWidth);
3125
3126 CallSitePairs.push_back({OldCB, NewCB});
3127 return true;
3128 };
3129
3130 // Use the CallSiteReplacementCreator to create replacement call sites.
3131 bool UsedAssumedInformation = false;
3132 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3133 true, nullptr, UsedAssumedInformation,
3134 /* CheckPotentiallyDead */ true);
3135 (void)Success;
3136 assert(Success && "Assumed call site replacement to succeed!");
3137
3138 // Rewire the arguments.
3139 Argument *OldFnArgIt = OldFn->arg_begin();
3140 Argument *NewFnArgIt = NewFn->arg_begin();
3141 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3142 ++OldArgNum, ++OldFnArgIt) {
3143 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3144 ARIs[OldArgNum]) {
3145 if (ARI->CalleeRepairCB)
3146 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3147 if (ARI->ReplacementTypes.empty())
3148 OldFnArgIt->replaceAllUsesWith(
3149 PoisonValue::get(OldFnArgIt->getType()));
3150 NewFnArgIt += ARI->ReplacementTypes.size();
3151 } else {
3152 NewFnArgIt->takeName(&*OldFnArgIt);
3153 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3154 ++NewFnArgIt;
3155 }
3156 }
3157
3158 // Eliminate the instructions *after* we visited all of them.
3159 for (auto &CallSitePair : CallSitePairs) {
3160 CallBase &OldCB = *CallSitePair.first;
3161 CallBase &NewCB = *CallSitePair.second;
3162 assert(OldCB.getType() == NewCB.getType() &&
3163 "Cannot handle call sites with different types!");
3164 ModifiedFns.insert(OldCB.getFunction());
3165 OldCB.replaceAllUsesWith(&NewCB);
3166 OldCB.eraseFromParent();
3167 }
3168
3169 // Replace the function in the call graph (if any).
3170 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3171
3172 // If the old function was modified and needed to be reanalyzed, the new one
3173 // does now.
3174 if (ModifiedFns.remove(OldFn))
3175 ModifiedFns.insert(NewFn);
3176
3177 Changed = ChangeStatus::CHANGED;
3178 }
3179
3180 return Changed;
3181 }
3182
initializeInformationCache(const Function & CF,FunctionInfo & FI)3183 void InformationCache::initializeInformationCache(const Function &CF,
3184 FunctionInfo &FI) {
3185 // As we do not modify the function here we can remove the const
3186 // withouth breaking implicit assumptions. At the end of the day, we could
3187 // initialize the cache eagerly which would look the same to the users.
3188 Function &F = const_cast<Function &>(CF);
3189
3190 FI.IsKernel = F.hasFnAttribute("kernel");
3191
3192 // Walk all instructions to find interesting instructions that might be
3193 // queried by abstract attributes during their initialization or update.
3194 // This has to happen before we create attributes.
3195
3196 DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3197
3198 // Add \p V to the assume uses map which track the number of uses outside of
3199 // "visited" assumes. If no outside uses are left the value is added to the
3200 // assume only use vector.
3201 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3202 SmallVector<const Instruction *> Worklist;
3203 if (auto *I = dyn_cast<Instruction>(&V))
3204 Worklist.push_back(I);
3205 while (!Worklist.empty()) {
3206 const Instruction *I = Worklist.pop_back_val();
3207 std::optional<short> &NumUses = AssumeUsesMap[I];
3208 if (!NumUses)
3209 NumUses = I->getNumUses();
3210 NumUses = *NumUses - /* this assume */ 1;
3211 if (*NumUses != 0)
3212 continue;
3213 AssumeOnlyValues.insert(I);
3214 for (const Value *Op : I->operands())
3215 if (auto *OpI = dyn_cast<Instruction>(Op))
3216 Worklist.push_back(OpI);
3217 }
3218 };
3219
3220 for (Instruction &I : instructions(&F)) {
3221 bool IsInterestingOpcode = false;
3222
3223 // To allow easy access to all instructions in a function with a given
3224 // opcode we store them in the InfoCache. As not all opcodes are interesting
3225 // to concrete attributes we only cache the ones that are as identified in
3226 // the following switch.
3227 // Note: There are no concrete attributes now so this is initially empty.
3228 switch (I.getOpcode()) {
3229 default:
3230 assert(!isa<CallBase>(&I) &&
3231 "New call base instruction type needs to be known in the "
3232 "Attributor.");
3233 break;
3234 case Instruction::Call:
3235 // Calls are interesting on their own, additionally:
3236 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3237 // For `must-tail` calls we remember the caller and callee.
3238 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3239 AssumeOnlyValues.insert(Assume);
3240 fillMapFromAssume(*Assume, KnowledgeMap);
3241 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3242 } else if (cast<CallInst>(I).isMustTailCall()) {
3243 FI.ContainsMustTailCall = true;
3244 if (auto *Callee = dyn_cast_if_present<Function>(
3245 cast<CallInst>(I).getCalledOperand()))
3246 getFunctionInfo(*Callee).CalledViaMustTail = true;
3247 }
3248 [[fallthrough]];
3249 case Instruction::CallBr:
3250 case Instruction::Invoke:
3251 case Instruction::CleanupRet:
3252 case Instruction::CatchSwitch:
3253 case Instruction::AtomicRMW:
3254 case Instruction::AtomicCmpXchg:
3255 case Instruction::Br:
3256 case Instruction::Resume:
3257 case Instruction::Ret:
3258 case Instruction::Load:
3259 // The alignment of a pointer is interesting for loads.
3260 case Instruction::Store:
3261 // The alignment of a pointer is interesting for stores.
3262 case Instruction::Alloca:
3263 case Instruction::AddrSpaceCast:
3264 IsInterestingOpcode = true;
3265 }
3266 if (IsInterestingOpcode) {
3267 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3268 if (!Insts)
3269 Insts = new (Allocator) InstructionVectorTy();
3270 Insts->push_back(&I);
3271 }
3272 if (I.mayReadOrWriteMemory())
3273 FI.RWInsts.push_back(&I);
3274 }
3275
3276 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3277 isInlineViable(F).isSuccess())
3278 InlineableFunctions.insert(&F);
3279 }
3280
~FunctionInfo()3281 InformationCache::FunctionInfo::~FunctionInfo() {
3282 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3283 // manually destroy them.
3284 for (auto &It : OpcodeInstMap)
3285 It.getSecond()->~InstructionVectorTy();
3286 }
3287
3288 ArrayRef<Function *>
getIndirectlyCallableFunctions(Attributor & A) const3289 InformationCache::getIndirectlyCallableFunctions(Attributor &A) const {
3290 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3291 return IndirectlyCallableFunctions;
3292 }
3293
getFlatAddressSpace() const3294 std::optional<unsigned> InformationCache::getFlatAddressSpace() const {
3295 if (TargetTriple.isGPU())
3296 return 0;
3297 return std::nullopt;
3298 }
3299
recordDependence(const AbstractAttribute & FromAA,const AbstractAttribute & ToAA,DepClassTy DepClass)3300 void Attributor::recordDependence(const AbstractAttribute &FromAA,
3301 const AbstractAttribute &ToAA,
3302 DepClassTy DepClass) {
3303 if (DepClass == DepClassTy::NONE)
3304 return;
3305 // If we are outside of an update, thus before the actual fixpoint iteration
3306 // started (= when we create AAs), we do not track dependences because we will
3307 // put all AAs into the initial worklist anyway.
3308 if (DependenceStack.empty())
3309 return;
3310 if (FromAA.getState().isAtFixpoint())
3311 return;
3312 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3313 }
3314
rememberDependences()3315 void Attributor::rememberDependences() {
3316 assert(!DependenceStack.empty() && "No dependences to remember!");
3317
3318 for (DepInfo &DI : *DependenceStack.back()) {
3319 assert((DI.DepClass == DepClassTy::REQUIRED ||
3320 DI.DepClass == DepClassTy::OPTIONAL) &&
3321 "Expected required or optional dependence (1 bit)!");
3322 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3323 DepAAs.insert(AbstractAttribute::DepTy(
3324 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3325 }
3326 }
3327
3328 template <Attribute::AttrKind AK, typename AAType>
checkAndQueryIRAttr(const IRPosition & IRP,AttributeSet Attrs,bool SkipHasAttrCheck)3329 void Attributor::checkAndQueryIRAttr(const IRPosition &IRP, AttributeSet Attrs,
3330 bool SkipHasAttrCheck) {
3331 bool IsKnown;
3332 if (SkipHasAttrCheck || !Attrs.hasAttribute(AK))
3333 if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3334 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3335 IsKnown))
3336 getOrCreateAAFor<AAType>(IRP);
3337 }
3338
identifyDefaultAbstractAttributes(Function & F)3339 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
3340 if (!VisitedFunctions.insert(&F).second)
3341 return;
3342 if (F.isDeclaration())
3343 return;
3344
3345 // In non-module runs we need to look at the call sites of a function to
3346 // determine if it is part of a must-tail call edge. This will influence what
3347 // attributes we can derive.
3348 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3349 if (!isModulePass() && !FI.CalledViaMustTail) {
3350 for (const Use &U : F.uses())
3351 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3352 if (CB->isCallee(&U) && CB->isMustTailCall())
3353 FI.CalledViaMustTail = true;
3354 }
3355
3356 IRPosition FPos = IRPosition::function(F);
3357 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3358 auto Attrs = F.getAttributes();
3359 auto FnAttrs = Attrs.getFnAttrs();
3360
3361 // Check for dead BasicBlocks in every function.
3362 // We need dead instruction detection because we do not want to deal with
3363 // broken IR in which SSA rules do not apply.
3364 getOrCreateAAFor<AAIsDead>(FPos);
3365
3366 // Every function might contain instructions that cause "undefined
3367 // behavior".
3368 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3369
3370 // Every function might be applicable for Heap-To-Stack conversion.
3371 if (EnableHeapToStack)
3372 getOrCreateAAFor<AAHeapToStack>(FPos);
3373
3374 // Every function might be "must-progress".
3375 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3376
3377 // Every function might be "no-free".
3378 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3379
3380 // Every function might be "will-return".
3381 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3382
3383 // Every function might be marked "nosync"
3384 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3385
3386 // Everything that is visible from the outside (=function, argument, return
3387 // positions), cannot be changed if the function is not IPO amendable. We can
3388 // however analyse the code inside.
3389 if (IsIPOAmendable) {
3390
3391 // Every function can be nounwind.
3392 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3393
3394 // Every function might be "no-return".
3395 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3396
3397 // Every function might be "no-recurse".
3398 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3399
3400 // Every function can be "non-convergent".
3401 if (Attrs.hasFnAttr(Attribute::Convergent))
3402 getOrCreateAAFor<AANonConvergent>(FPos);
3403
3404 // Every function might be "readnone/readonly/writeonly/...".
3405 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3406
3407 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3408 getOrCreateAAFor<AAMemoryLocation>(FPos);
3409
3410 // Every function can track active assumptions.
3411 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3412
3413 // If we're not using a dynamic mode for float, there's nothing worthwhile
3414 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3415 // denormal-fp-math-f32=something, but that likely has no real world use.
3416 DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3417 if (Mode.Input == DenormalMode::Dynamic ||
3418 Mode.Output == DenormalMode::Dynamic)
3419 getOrCreateAAFor<AADenormalFPMath>(FPos);
3420
3421 // Return attributes are only appropriate if the return type is non void.
3422 Type *ReturnType = F.getReturnType();
3423 if (!ReturnType->isVoidTy()) {
3424 IRPosition RetPos = IRPosition::returned(F);
3425 AttributeSet RetAttrs = Attrs.getRetAttrs();
3426
3427 // Every returned value might be dead.
3428 getOrCreateAAFor<AAIsDead>(RetPos);
3429
3430 // Every function might be simplified.
3431 bool UsedAssumedInformation = false;
3432 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3433 AA::Intraprocedural);
3434
3435 // Every returned value might be marked noundef.
3436 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3437
3438 if (ReturnType->isPointerTy()) {
3439
3440 // Every function with pointer return type might be marked align.
3441 getOrCreateAAFor<AAAlign>(RetPos);
3442
3443 // Every function with pointer return type might be marked nonnull.
3444 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3445
3446 // Every function with pointer return type might be marked noalias.
3447 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3448
3449 // Every function with pointer return type might be marked
3450 // dereferenceable.
3451 getOrCreateAAFor<AADereferenceable>(RetPos);
3452 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3453 getOrCreateAAFor<AANoFPClass>(RetPos);
3454 }
3455 }
3456 }
3457
3458 for (Argument &Arg : F.args()) {
3459 IRPosition ArgPos = IRPosition::argument(Arg);
3460 auto ArgNo = Arg.getArgNo();
3461 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3462
3463 if (!IsIPOAmendable) {
3464 if (Arg.getType()->isPointerTy())
3465 // Every argument with pointer type might be marked nofree.
3466 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3467 continue;
3468 }
3469
3470 // Every argument might be simplified. We have to go through the
3471 // Attributor interface though as outside AAs can register custom
3472 // simplification callbacks.
3473 bool UsedAssumedInformation = false;
3474 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3475 AA::Intraprocedural);
3476
3477 // Every argument might be dead.
3478 getOrCreateAAFor<AAIsDead>(ArgPos);
3479
3480 // Every argument might be marked noundef.
3481 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3482
3483 if (Arg.getType()->isPointerTy()) {
3484 // Every argument with pointer type might be marked nonnull.
3485 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3486
3487 // Every argument with pointer type might be marked noalias.
3488 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3489
3490 // Every argument with pointer type might be marked dereferenceable.
3491 getOrCreateAAFor<AADereferenceable>(ArgPos);
3492
3493 // Every argument with pointer type might be marked align.
3494 getOrCreateAAFor<AAAlign>(ArgPos);
3495
3496 // Every argument with pointer type might be marked nocapture.
3497 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3498 ArgPos, ArgAttrs, /*SkipHasAttrCheck=*/true);
3499
3500 // Every argument with pointer type might be marked
3501 // "readnone/readonly/writeonly/..."
3502 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3503
3504 // Every argument with pointer type might be marked nofree.
3505 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3506
3507 // Every argument with pointer type might be privatizable (or
3508 // promotable)
3509 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3510 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3511 getOrCreateAAFor<AANoFPClass>(ArgPos);
3512 }
3513 }
3514
3515 auto CallSitePred = [&](Instruction &I) -> bool {
3516 auto &CB = cast<CallBase>(I);
3517 IRPosition CBInstPos = IRPosition::inst(CB);
3518 IRPosition CBFnPos = IRPosition::callsite_function(CB);
3519
3520 // Call sites might be dead if they do not have side effects and no live
3521 // users. The return value might be dead if there are no live users.
3522 getOrCreateAAFor<AAIsDead>(CBInstPos);
3523
3524 Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3525 // TODO: Even if the callee is not known now we might be able to simplify
3526 // the call/callee.
3527 if (!Callee) {
3528 getOrCreateAAFor<AAIndirectCallInfo>(CBFnPos);
3529 return true;
3530 }
3531
3532 // Every call site can track active assumptions.
3533 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3534
3535 // Skip declarations except if annotations on their call sites were
3536 // explicitly requested.
3537 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3538 !Callee->hasMetadata(LLVMContext::MD_callback))
3539 return true;
3540
3541 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3542 IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3543 bool UsedAssumedInformation = false;
3544 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3545 AA::Intraprocedural);
3546
3547 if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3548 getOrCreateAAFor<AANoFPClass>(CBInstPos);
3549 }
3550
3551 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3552 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3553
3554 IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);
3555 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3556
3557 // Every call site argument might be dead.
3558 getOrCreateAAFor<AAIsDead>(CBArgPos);
3559
3560 // Call site argument might be simplified. We have to go through the
3561 // Attributor interface though as outside AAs can register custom
3562 // simplification callbacks.
3563 bool UsedAssumedInformation = false;
3564 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3565 AA::Intraprocedural);
3566
3567 // Every call site argument might be marked "noundef".
3568 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3569
3570 Type *ArgTy = CB.getArgOperand(I)->getType();
3571
3572 if (!ArgTy->isPointerTy()) {
3573 if (AttributeFuncs::isNoFPClassCompatibleType(ArgTy))
3574 getOrCreateAAFor<AANoFPClass>(CBArgPos);
3575
3576 continue;
3577 }
3578
3579 // Call site argument attribute "non-null".
3580 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3581
3582 // Call site argument attribute "captures(none)".
3583 checkAndQueryIRAttr<Attribute::Captures, AANoCapture>(
3584 CBArgPos, CBArgAttrs, /*SkipHasAttrCheck=*/true);
3585
3586 // Call site argument attribute "no-alias".
3587 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3588
3589 // Call site argument attribute "dereferenceable".
3590 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3591
3592 // Call site argument attribute "align".
3593 getOrCreateAAFor<AAAlign>(CBArgPos);
3594
3595 // Call site argument attribute
3596 // "readnone/readonly/writeonly/..."
3597 if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3598 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3599
3600 // Call site argument attribute "nofree".
3601 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3602 }
3603 return true;
3604 };
3605
3606 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3607 [[maybe_unused]] bool Success;
3608 bool UsedAssumedInformation = false;
3609 Success = checkForAllInstructionsImpl(
3610 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3611 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3612 (unsigned)Instruction::Call},
3613 UsedAssumedInformation);
3614 assert(Success && "Expected the check call to be successful!");
3615
3616 auto LoadStorePred = [&](Instruction &I) -> bool {
3617 if (auto *LI = dyn_cast<LoadInst>(&I)) {
3618 getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3619 if (SimplifyAllLoads)
3620 getAssumedSimplified(IRPosition::value(I), nullptr,
3621 UsedAssumedInformation, AA::Intraprocedural);
3622 getOrCreateAAFor<AAInvariantLoadPointer>(
3623 IRPosition::value(*LI->getPointerOperand()));
3624 getOrCreateAAFor<AAAddressSpace>(
3625 IRPosition::value(*LI->getPointerOperand()));
3626 } else {
3627 auto &SI = cast<StoreInst>(I);
3628 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3629 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3630 UsedAssumedInformation, AA::Intraprocedural);
3631 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3632 getOrCreateAAFor<AAAddressSpace>(
3633 IRPosition::value(*SI.getPointerOperand()));
3634 }
3635 return true;
3636 };
3637 Success = checkForAllInstructionsImpl(
3638 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3639 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3640 UsedAssumedInformation);
3641 assert(Success && "Expected the check call to be successful!");
3642
3643 // AllocaInstPredicate
3644 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3645 getOrCreateAAFor<AAAllocationInfo>(IRPosition::value(I));
3646 return true;
3647 };
3648
3649 Success = checkForAllInstructionsImpl(
3650 nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3651 {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3652 assert(Success && "Expected the check call to be successful!");
3653 }
3654
isClosedWorldModule() const3655 bool Attributor::isClosedWorldModule() const {
3656 if (CloseWorldAssumption.getNumOccurrences())
3657 return CloseWorldAssumption;
3658 return isModulePass() && Configuration.IsClosedWorldModule;
3659 }
3660
3661 /// Helpers to ease debugging through output streams and print calls.
3662 ///
3663 ///{
operator <<(raw_ostream & OS,ChangeStatus S)3664 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3665 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3666 }
3667
operator <<(raw_ostream & OS,IRPosition::Kind AP)3668 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3669 switch (AP) {
3670 case IRPosition::IRP_INVALID:
3671 return OS << "inv";
3672 case IRPosition::IRP_FLOAT:
3673 return OS << "flt";
3674 case IRPosition::IRP_RETURNED:
3675 return OS << "fn_ret";
3676 case IRPosition::IRP_CALL_SITE_RETURNED:
3677 return OS << "cs_ret";
3678 case IRPosition::IRP_FUNCTION:
3679 return OS << "fn";
3680 case IRPosition::IRP_CALL_SITE:
3681 return OS << "cs";
3682 case IRPosition::IRP_ARGUMENT:
3683 return OS << "arg";
3684 case IRPosition::IRP_CALL_SITE_ARGUMENT:
3685 return OS << "cs_arg";
3686 }
3687 llvm_unreachable("Unknown attribute position!");
3688 }
3689
operator <<(raw_ostream & OS,const IRPosition & Pos)3690 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3691 const Value &AV = Pos.getAssociatedValue();
3692 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3693 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3694
3695 if (Pos.hasCallBaseContext())
3696 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3697 return OS << "}";
3698 }
3699
operator <<(raw_ostream & OS,const IntegerRangeState & S)3700 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3701 OS << "range-state(" << S.getBitWidth() << ")<";
3702 S.getKnown().print(OS);
3703 OS << " / ";
3704 S.getAssumed().print(OS);
3705 OS << ">";
3706
3707 return OS << static_cast<const AbstractState &>(S);
3708 }
3709
operator <<(raw_ostream & OS,const AbstractState & S)3710 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3711 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3712 }
3713
operator <<(raw_ostream & OS,const AbstractAttribute & AA)3714 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3715 AA.print(OS);
3716 return OS;
3717 }
3718
operator <<(raw_ostream & OS,const PotentialConstantIntValuesState & S)3719 raw_ostream &llvm::operator<<(raw_ostream &OS,
3720 const PotentialConstantIntValuesState &S) {
3721 OS << "set-state(< {";
3722 if (!S.isValidState())
3723 OS << "full-set";
3724 else {
3725 for (const auto &It : S.getAssumedSet())
3726 OS << It << ", ";
3727 if (S.undefIsContained())
3728 OS << "undef ";
3729 }
3730 OS << "} >)";
3731
3732 return OS;
3733 }
3734
operator <<(raw_ostream & OS,const PotentialLLVMValuesState & S)3735 raw_ostream &llvm::operator<<(raw_ostream &OS,
3736 const PotentialLLVMValuesState &S) {
3737 OS << "set-state(< {";
3738 if (!S.isValidState())
3739 OS << "full-set";
3740 else {
3741 for (const auto &It : S.getAssumedSet()) {
3742 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3743 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3744 else
3745 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3746 }
3747 if (S.undefIsContained())
3748 OS << "undef ";
3749 }
3750 OS << "} >)";
3751
3752 return OS;
3753 }
3754
print(Attributor * A,raw_ostream & OS) const3755 void AbstractAttribute::print(Attributor *A, raw_ostream &OS) const {
3756 OS << "[";
3757 OS << getName();
3758 OS << "] for CtxI ";
3759
3760 if (auto *I = getCtxI()) {
3761 OS << "'";
3762 I->print(OS);
3763 OS << "'";
3764 } else
3765 OS << "<<null inst>>";
3766
3767 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3768 << '\n';
3769 }
3770
printWithDeps(raw_ostream & OS) const3771 void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3772 print(OS);
3773
3774 for (const auto &DepAA : Deps) {
3775 auto *AA = DepAA.getPointer();
3776 OS << " updates ";
3777 AA->print(OS);
3778 }
3779
3780 OS << '\n';
3781 }
3782
operator <<(raw_ostream & OS,const AAPointerInfo::Access & Acc)3783 raw_ostream &llvm::operator<<(raw_ostream &OS,
3784 const AAPointerInfo::Access &Acc) {
3785 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3786 if (Acc.getLocalInst() != Acc.getRemoteInst())
3787 OS << " via " << *Acc.getLocalInst();
3788 if (Acc.getContent()) {
3789 if (*Acc.getContent())
3790 OS << " [" << **Acc.getContent() << "]";
3791 else
3792 OS << " [ <unknown> ]";
3793 }
3794 return OS;
3795 }
3796 ///}
3797
3798 /// ----------------------------------------------------------------------------
3799 /// Pass (Manager) Boilerplate
3800 /// ----------------------------------------------------------------------------
3801
runAttributorOnFunctions(InformationCache & InfoCache,SetVector<Function * > & Functions,AnalysisGetter & AG,CallGraphUpdater & CGUpdater,bool DeleteFns,bool IsModulePass)3802 static bool runAttributorOnFunctions(InformationCache &InfoCache,
3803 SetVector<Function *> &Functions,
3804 AnalysisGetter &AG,
3805 CallGraphUpdater &CGUpdater,
3806 bool DeleteFns, bool IsModulePass) {
3807 if (Functions.empty())
3808 return false;
3809
3810 LLVM_DEBUG({
3811 dbgs() << "[Attributor] Run on module with " << Functions.size()
3812 << " functions:\n";
3813 for (Function *Fn : Functions)
3814 dbgs() << " - " << Fn->getName() << "\n";
3815 });
3816
3817 // Create an Attributor and initially empty information cache that is filled
3818 // while we identify default attribute opportunities.
3819 AttributorConfig AC(CGUpdater);
3820 AC.IsModulePass = IsModulePass;
3821 AC.DeleteFns = DeleteFns;
3822
3823 /// Tracking callback for specialization of indirect calls.
3824 DenseMap<CallBase *, std::unique_ptr<SmallPtrSet<Function *, 8>>>
3825 IndirectCalleeTrackingMap;
3826 if (MaxSpecializationPerCB.getNumOccurrences()) {
3827 AC.IndirectCalleeSpecializationCallback =
3828 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3829 Function &Callee, unsigned) {
3830 if (MaxSpecializationPerCB == 0)
3831 return false;
3832 auto &Set = IndirectCalleeTrackingMap[&CB];
3833 if (!Set)
3834 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3835 if (Set->size() >= MaxSpecializationPerCB)
3836 return Set->contains(&Callee);
3837 Set->insert(&Callee);
3838 return true;
3839 };
3840 }
3841
3842 Attributor A(Functions, InfoCache, AC);
3843
3844 // Create shallow wrappers for all functions that are not IPO amendable
3845 if (AllowShallowWrappers)
3846 for (Function *F : Functions)
3847 if (!A.isFunctionIPOAmendable(*F))
3848 Attributor::createShallowWrapper(*F);
3849
3850 // Internalize non-exact functions
3851 // TODO: for now we eagerly internalize functions without calculating the
3852 // cost, we need a cost interface to determine whether internalizing
3853 // a function is "beneficial"
3854 if (AllowDeepWrapper) {
3855 unsigned FunSize = Functions.size();
3856 for (unsigned u = 0; u < FunSize; u++) {
3857 Function *F = Functions[u];
3858 if (!F->isDeclaration() && !F->isDefinitionExact() && !F->use_empty() &&
3859 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3860 Function *NewF = Attributor::internalizeFunction(*F);
3861 assert(NewF && "Could not internalize function.");
3862 Functions.insert(NewF);
3863
3864 // Update call graph
3865 CGUpdater.replaceFunctionWith(*F, *NewF);
3866 for (const Use &U : NewF->uses())
3867 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3868 auto *CallerF = CB->getCaller();
3869 CGUpdater.reanalyzeFunction(*CallerF);
3870 }
3871 }
3872 }
3873 }
3874
3875 for (Function *F : Functions) {
3876 if (F->hasExactDefinition())
3877 NumFnWithExactDefinition++;
3878 else
3879 NumFnWithoutExactDefinition++;
3880
3881 // We look at internal functions only on-demand but if any use is not a
3882 // direct call or outside the current set of analyzed functions, we have
3883 // to do it eagerly.
3884 if (F->hasLocalLinkage()) {
3885 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3886 const auto *CB = dyn_cast<CallBase>(U.getUser());
3887 return CB && CB->isCallee(&U) &&
3888 Functions.count(const_cast<Function *>(CB->getCaller()));
3889 }))
3890 continue;
3891 }
3892
3893 // Populate the Attributor with abstract attribute opportunities in the
3894 // function and the information cache with IR information.
3895 A.identifyDefaultAbstractAttributes(*F);
3896 }
3897
3898 ChangeStatus Changed = A.run();
3899
3900 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3901 << " functions, result: " << Changed << ".\n");
3902 return Changed == ChangeStatus::CHANGED;
3903 }
3904
runAttributorLightOnFunctions(InformationCache & InfoCache,SetVector<Function * > & Functions,AnalysisGetter & AG,CallGraphUpdater & CGUpdater,FunctionAnalysisManager & FAM,bool IsModulePass)3905 static bool runAttributorLightOnFunctions(InformationCache &InfoCache,
3906 SetVector<Function *> &Functions,
3907 AnalysisGetter &AG,
3908 CallGraphUpdater &CGUpdater,
3909 FunctionAnalysisManager &FAM,
3910 bool IsModulePass) {
3911 if (Functions.empty())
3912 return false;
3913
3914 LLVM_DEBUG({
3915 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3916 << " functions:\n";
3917 for (Function *Fn : Functions)
3918 dbgs() << " - " << Fn->getName() << "\n";
3919 });
3920
3921 // Create an Attributor and initially empty information cache that is filled
3922 // while we identify default attribute opportunities.
3923 AttributorConfig AC(CGUpdater);
3924 AC.IsModulePass = IsModulePass;
3925 AC.DeleteFns = false;
3926 DenseSet<const char *> Allowed(
3927 {&AAWillReturn::ID, &AANoUnwind::ID, &AANoRecurse::ID, &AANoSync::ID,
3928 &AANoFree::ID, &AANoReturn::ID, &AAMemoryLocation::ID,
3929 &AAMemoryBehavior::ID, &AAUnderlyingObjects::ID, &AANoCapture::ID,
3930 &AAInterFnReachability::ID, &AAIntraFnReachability::ID, &AACallEdges::ID,
3931 &AANoFPClass::ID, &AAMustProgress::ID, &AANonNull::ID});
3932 AC.Allowed = &Allowed;
3933 AC.UseLiveness = false;
3934
3935 Attributor A(Functions, InfoCache, AC);
3936
3937 for (Function *F : Functions) {
3938 if (F->hasExactDefinition())
3939 NumFnWithExactDefinition++;
3940 else
3941 NumFnWithoutExactDefinition++;
3942
3943 // We look at internal functions only on-demand but if any use is not a
3944 // direct call or outside the current set of analyzed functions, we have
3945 // to do it eagerly.
3946 if (AC.UseLiveness && F->hasLocalLinkage()) {
3947 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3948 const auto *CB = dyn_cast<CallBase>(U.getUser());
3949 return CB && CB->isCallee(&U) &&
3950 Functions.count(const_cast<Function *>(CB->getCaller()));
3951 }))
3952 continue;
3953 }
3954
3955 // Populate the Attributor with abstract attribute opportunities in the
3956 // function and the information cache with IR information.
3957 A.identifyDefaultAbstractAttributes(*F);
3958 }
3959
3960 ChangeStatus Changed = A.run();
3961
3962 if (Changed == ChangeStatus::CHANGED) {
3963 // Invalidate analyses for modified functions so that we don't have to
3964 // invalidate all analyses for all functions in this SCC.
3965 PreservedAnalyses FuncPA;
3966 // We haven't changed the CFG for modified functions.
3967 FuncPA.preserveSet<CFGAnalyses>();
3968 for (Function *Changed : A.getModifiedFunctions()) {
3969 FAM.invalidate(*Changed, FuncPA);
3970 // Also invalidate any direct callers of changed functions since analyses
3971 // may care about attributes of direct callees. For example, MemorySSA
3972 // cares about whether or not a call's callee modifies memory and queries
3973 // that through function attributes.
3974 for (auto *U : Changed->users()) {
3975 if (auto *Call = dyn_cast<CallBase>(U)) {
3976 if (Call->getCalledFunction() == Changed)
3977 FAM.invalidate(*Call->getFunction(), FuncPA);
3978 }
3979 }
3980 }
3981 }
3982 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3983 << " functions, result: " << Changed << ".\n");
3984 return Changed == ChangeStatus::CHANGED;
3985 }
3986
viewGraph()3987 void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3988
dumpGraph()3989 void AADepGraph::dumpGraph() {
3990 static std::atomic<int> CallTimes;
3991 std::string Prefix;
3992
3993 if (!DepGraphDotFileNamePrefix.empty())
3994 Prefix = DepGraphDotFileNamePrefix;
3995 else
3996 Prefix = "dep_graph";
3997 std::string Filename =
3998 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3999
4000 outs() << "Dependency graph dump to " << Filename << ".\n";
4001
4002 std::error_code EC;
4003
4004 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
4005 if (!EC)
4006 llvm::WriteGraph(File, this);
4007
4008 CallTimes++;
4009 }
4010
print()4011 void AADepGraph::print() {
4012 for (auto DepAA : SyntheticRoot.Deps)
4013 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4014 }
4015
run(Module & M,ModuleAnalysisManager & AM)4016 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
4017 FunctionAnalysisManager &FAM =
4018 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4019 AnalysisGetter AG(FAM);
4020
4021 SetVector<Function *> Functions;
4022 for (Function &F : M)
4023 Functions.insert(&F);
4024
4025 CallGraphUpdater CGUpdater;
4026 BumpPtrAllocator Allocator;
4027 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4028 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4029 /* DeleteFns */ true, /* IsModulePass */ true)) {
4030 // FIXME: Think about passes we will preserve and add them here.
4031 return PreservedAnalyses::none();
4032 }
4033 return PreservedAnalyses::all();
4034 }
4035
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)4036 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
4037 CGSCCAnalysisManager &AM,
4038 LazyCallGraph &CG,
4039 CGSCCUpdateResult &UR) {
4040 FunctionAnalysisManager &FAM =
4041 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4042 AnalysisGetter AG(FAM);
4043
4044 SetVector<Function *> Functions;
4045 for (LazyCallGraph::Node &N : C)
4046 Functions.insert(&N.getFunction());
4047
4048 if (Functions.empty())
4049 return PreservedAnalyses::all();
4050
4051 Module &M = *Functions.back()->getParent();
4052 CallGraphUpdater CGUpdater;
4053 CGUpdater.initialize(CG, C, AM, UR);
4054 BumpPtrAllocator Allocator;
4055 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4056 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4057 /* DeleteFns */ false,
4058 /* IsModulePass */ false)) {
4059 // FIXME: Think about passes we will preserve and add them here.
4060 PreservedAnalyses PA;
4061 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4062 return PA;
4063 }
4064 return PreservedAnalyses::all();
4065 }
4066
run(Module & M,ModuleAnalysisManager & AM)4067 PreservedAnalyses AttributorLightPass::run(Module &M,
4068 ModuleAnalysisManager &AM) {
4069 FunctionAnalysisManager &FAM =
4070 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4071 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4072
4073 SetVector<Function *> Functions;
4074 for (Function &F : M)
4075 Functions.insert(&F);
4076
4077 CallGraphUpdater CGUpdater;
4078 BumpPtrAllocator Allocator;
4079 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4080 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4081 /* IsModulePass */ true)) {
4082 PreservedAnalyses PA;
4083 // We have not added or removed functions.
4084 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4085 // We already invalidated all relevant function analyses above.
4086 PA.preserveSet<AllAnalysesOn<Function>>();
4087 return PA;
4088 }
4089 return PreservedAnalyses::all();
4090 }
4091
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)4092 PreservedAnalyses AttributorLightCGSCCPass::run(LazyCallGraph::SCC &C,
4093 CGSCCAnalysisManager &AM,
4094 LazyCallGraph &CG,
4095 CGSCCUpdateResult &UR) {
4096 FunctionAnalysisManager &FAM =
4097 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4098 AnalysisGetter AG(FAM);
4099
4100 SetVector<Function *> Functions;
4101 for (LazyCallGraph::Node &N : C)
4102 Functions.insert(&N.getFunction());
4103
4104 if (Functions.empty())
4105 return PreservedAnalyses::all();
4106
4107 Module &M = *Functions.back()->getParent();
4108 CallGraphUpdater CGUpdater;
4109 CGUpdater.initialize(CG, C, AM, UR);
4110 BumpPtrAllocator Allocator;
4111 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4112 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4113 /* IsModulePass */ false)) {
4114 PreservedAnalyses PA;
4115 // We have not added or removed functions.
4116 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4117 // We already invalidated all relevant function analyses above.
4118 PA.preserveSet<AllAnalysesOn<Function>>();
4119 return PA;
4120 }
4121 return PreservedAnalyses::all();
4122 }
4123 namespace llvm {
4124
4125 template <> struct GraphTraits<AADepGraphNode *> {
4126 using NodeRef = AADepGraphNode *;
4127 using DepTy = PointerIntPair<AADepGraphNode *, 1>;
4128 using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
4129
getEntryNodellvm::GraphTraits4130 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
DepGetValllvm::GraphTraits4131 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4132
4133 using ChildIteratorType =
4134 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4135 using ChildEdgeIteratorType = AADepGraphNode::DepSetTy::iterator;
4136
child_beginllvm::GraphTraits4137 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4138
child_endllvm::GraphTraits4139 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4140 };
4141
4142 template <>
4143 struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
getEntryNodellvm::GraphTraits4144 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4145
4146 using nodes_iterator =
4147 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4148
nodes_beginllvm::GraphTraits4149 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4150
nodes_endllvm::GraphTraits4151 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4152 };
4153
4154 template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
DOTGraphTraitsllvm::DOTGraphTraits4155 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
4156
getNodeLabelllvm::DOTGraphTraits4157 static std::string getNodeLabel(const AADepGraphNode *Node,
4158 const AADepGraph *DG) {
4159 std::string AAString;
4160 raw_string_ostream O(AAString);
4161 Node->print(O);
4162 return AAString;
4163 }
4164 };
4165
4166 } // end namespace llvm
4167