xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp (revision 6e516c87b6d779911edde7481d8aef165b837a03)
1 //==- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation --==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the generic AliasAnalysis interface which is used as the
10 // common interface used by all clients and implementations of alias analysis.
11 //
12 // This file also implements the default version of the AliasAnalysis interface
13 // that is to be used when no other implementation is specified.  This does some
14 // simple tests that detect obvious cases: two different global pointers cannot
15 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
16 // etc.
17 //
18 // This alias analysis implementation really isn't very good for anything, but
19 // it is very fast, and makes a nice clean default implementation.  Because it
20 // handles lots of little corner cases, other, more complex, alias analysis
21 // implementations may choose to rely on this pass to resolve these simple and
22 // easy cases.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CaptureTracking.h"
30 #include "llvm/Analysis/GlobalsModRef.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
33 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
34 #include "llvm/Analysis/ScopedNoAliasAA.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/Attributes.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include "llvm/InitializePasses.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/AtomicOrdering.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <functional>
53 #include <iterator>
54 
55 #define DEBUG_TYPE "aa"
56 
57 using namespace llvm;
58 
59 STATISTIC(NumNoAlias,   "Number of NoAlias results");
60 STATISTIC(NumMayAlias,  "Number of MayAlias results");
61 STATISTIC(NumMustAlias, "Number of MustAlias results");
62 
63 namespace llvm {
64 /// Allow disabling BasicAA from the AA results. This is particularly useful
65 /// when testing to isolate a single AA implementation.
66 cl::opt<bool> DisableBasicAA("disable-basic-aa", cl::Hidden, cl::init(false));
67 } // namespace llvm
68 
69 #ifndef NDEBUG
70 /// Print a trace of alias analysis queries and their results.
71 static cl::opt<bool> EnableAATrace("aa-trace", cl::Hidden, cl::init(false));
72 #else
73 static const bool EnableAATrace = false;
74 #endif
75 
AAResults(const TargetLibraryInfo & TLI)76 AAResults::AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
77 
AAResults(AAResults && Arg)78 AAResults::AAResults(AAResults &&Arg)
79     : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {}
80 
~AAResults()81 AAResults::~AAResults() {}
82 
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)83 bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA,
84                            FunctionAnalysisManager::Invalidator &Inv) {
85   // AAResults preserves the AAManager by default, due to the stateless nature
86   // of AliasAnalysis. There is no need to check whether it has been preserved
87   // explicitly. Check if any module dependency was invalidated and caused the
88   // AAManager to be invalidated. Invalidate ourselves in that case.
89   auto PAC = PA.getChecker<AAManager>();
90   if (!PAC.preservedWhenStateless())
91     return true;
92 
93   // Check if any of the function dependencies were invalidated, and invalidate
94   // ourselves in that case.
95   for (AnalysisKey *ID : AADeps)
96     if (Inv.invalidate(ID, F, PA))
97       return true;
98 
99   // Everything we depend on is still fine, so are we. Nothing to invalidate.
100   return false;
101 }
102 
103 //===----------------------------------------------------------------------===//
104 // Default chaining methods
105 //===----------------------------------------------------------------------===//
106 
alias(const MemoryLocation & LocA,const MemoryLocation & LocB)107 AliasResult AAResults::alias(const MemoryLocation &LocA,
108                              const MemoryLocation &LocB) {
109   SimpleAAQueryInfo AAQIP(*this);
110   return alias(LocA, LocB, AAQIP, nullptr);
111 }
112 
alias(const MemoryLocation & LocA,const MemoryLocation & LocB,AAQueryInfo & AAQI,const Instruction * CtxI)113 AliasResult AAResults::alias(const MemoryLocation &LocA,
114                              const MemoryLocation &LocB, AAQueryInfo &AAQI,
115                              const Instruction *CtxI) {
116   AliasResult Result = AliasResult::MayAlias;
117 
118   if (EnableAATrace) {
119     for (unsigned I = 0; I < AAQI.Depth; ++I)
120       dbgs() << "  ";
121     dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", "
122            << *LocB.Ptr << " @ " << LocB.Size << "\n";
123   }
124 
125   AAQI.Depth++;
126   for (const auto &AA : AAs) {
127     Result = AA->alias(LocA, LocB, AAQI, CtxI);
128     if (Result != AliasResult::MayAlias)
129       break;
130   }
131   AAQI.Depth--;
132 
133   if (EnableAATrace) {
134     for (unsigned I = 0; I < AAQI.Depth; ++I)
135       dbgs() << "  ";
136     dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", "
137            << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n";
138   }
139 
140   if (AAQI.Depth == 0) {
141     if (Result == AliasResult::NoAlias)
142       ++NumNoAlias;
143     else if (Result == AliasResult::MustAlias)
144       ++NumMustAlias;
145     else
146       ++NumMayAlias;
147   }
148   return Result;
149 }
150 
getModRefInfoMask(const MemoryLocation & Loc,bool IgnoreLocals)151 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
152                                         bool IgnoreLocals) {
153   SimpleAAQueryInfo AAQIP(*this);
154   return getModRefInfoMask(Loc, AAQIP, IgnoreLocals);
155 }
156 
getModRefInfoMask(const MemoryLocation & Loc,AAQueryInfo & AAQI,bool IgnoreLocals)157 ModRefInfo AAResults::getModRefInfoMask(const MemoryLocation &Loc,
158                                         AAQueryInfo &AAQI, bool IgnoreLocals) {
159   ModRefInfo Result = ModRefInfo::ModRef;
160 
161   for (const auto &AA : AAs) {
162     Result &= AA->getModRefInfoMask(Loc, AAQI, IgnoreLocals);
163 
164     // Early-exit the moment we reach the bottom of the lattice.
165     if (isNoModRef(Result))
166       return ModRefInfo::NoModRef;
167   }
168 
169   return Result;
170 }
171 
getArgModRefInfo(const CallBase * Call,unsigned ArgIdx)172 ModRefInfo AAResults::getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
173   ModRefInfo Result = ModRefInfo::ModRef;
174 
175   for (const auto &AA : AAs) {
176     Result &= AA->getArgModRefInfo(Call, ArgIdx);
177 
178     // Early-exit the moment we reach the bottom of the lattice.
179     if (isNoModRef(Result))
180       return ModRefInfo::NoModRef;
181   }
182 
183   return Result;
184 }
185 
getModRefInfo(const Instruction * I,const CallBase * Call2)186 ModRefInfo AAResults::getModRefInfo(const Instruction *I,
187                                     const CallBase *Call2) {
188   SimpleAAQueryInfo AAQIP(*this);
189   return getModRefInfo(I, Call2, AAQIP);
190 }
191 
getModRefInfo(const Instruction * I,const CallBase * Call2,AAQueryInfo & AAQI)192 ModRefInfo AAResults::getModRefInfo(const Instruction *I, const CallBase *Call2,
193                                     AAQueryInfo &AAQI) {
194   // We may have two calls.
195   if (const auto *Call1 = dyn_cast<CallBase>(I)) {
196     // Check if the two calls modify the same memory.
197     return getModRefInfo(Call1, Call2, AAQI);
198   }
199   // If this is a fence, just return ModRef.
200   if (I->isFenceLike())
201     return ModRefInfo::ModRef;
202   // Otherwise, check if the call modifies or references the
203   // location this memory access defines.  The best we can say
204   // is that if the call references what this instruction
205   // defines, it must be clobbered by this location.
206   const MemoryLocation DefLoc = MemoryLocation::get(I);
207   ModRefInfo MR = getModRefInfo(Call2, DefLoc, AAQI);
208   if (isModOrRefSet(MR))
209     return ModRefInfo::ModRef;
210   return ModRefInfo::NoModRef;
211 }
212 
getModRefInfo(const CallBase * Call,const MemoryLocation & Loc,AAQueryInfo & AAQI)213 ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
214                                     const MemoryLocation &Loc,
215                                     AAQueryInfo &AAQI) {
216   ModRefInfo Result = ModRefInfo::ModRef;
217 
218   for (const auto &AA : AAs) {
219     Result &= AA->getModRefInfo(Call, Loc, AAQI);
220 
221     // Early-exit the moment we reach the bottom of the lattice.
222     if (isNoModRef(Result))
223       return ModRefInfo::NoModRef;
224   }
225 
226   // Try to refine the mod-ref info further using other API entry points to the
227   // aggregate set of AA results.
228 
229   // We can completely ignore inaccessible memory here, because MemoryLocations
230   // can only reference accessible memory.
231   auto ME = getMemoryEffects(Call, AAQI)
232                 .getWithoutLoc(IRMemLocation::InaccessibleMem);
233   if (ME.doesNotAccessMemory())
234     return ModRefInfo::NoModRef;
235 
236   ModRefInfo ArgMR = ME.getModRef(IRMemLocation::ArgMem);
237   ModRefInfo OtherMR = ME.getWithoutLoc(IRMemLocation::ArgMem).getModRef();
238   if ((ArgMR | OtherMR) != OtherMR) {
239     // Refine the modref info for argument memory. We only bother to do this
240     // if ArgMR is not a subset of OtherMR, otherwise this won't have an impact
241     // on the final result.
242     ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
243     for (const auto &I : llvm::enumerate(Call->args())) {
244       const Value *Arg = I.value();
245       if (!Arg->getType()->isPointerTy())
246         continue;
247       unsigned ArgIdx = I.index();
248       MemoryLocation ArgLoc = MemoryLocation::getForArgument(Call, ArgIdx, TLI);
249       AliasResult ArgAlias = alias(ArgLoc, Loc, AAQI, Call);
250       if (ArgAlias != AliasResult::NoAlias)
251         AllArgsMask |= getArgModRefInfo(Call, ArgIdx);
252     }
253     ArgMR &= AllArgsMask;
254   }
255 
256   Result &= ArgMR | OtherMR;
257 
258   // Apply the ModRef mask. This ensures that if Loc is a constant memory
259   // location, we take into account the fact that the call definitely could not
260   // modify the memory location.
261   if (!isNoModRef(Result))
262     Result &= getModRefInfoMask(Loc);
263 
264   return Result;
265 }
266 
getModRefInfo(const CallBase * Call1,const CallBase * Call2,AAQueryInfo & AAQI)267 ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
268                                     const CallBase *Call2, AAQueryInfo &AAQI) {
269   ModRefInfo Result = ModRefInfo::ModRef;
270 
271   for (const auto &AA : AAs) {
272     Result &= AA->getModRefInfo(Call1, Call2, AAQI);
273 
274     // Early-exit the moment we reach the bottom of the lattice.
275     if (isNoModRef(Result))
276       return ModRefInfo::NoModRef;
277   }
278 
279   // Try to refine the mod-ref info further using other API entry points to the
280   // aggregate set of AA results.
281 
282   // If Call1 or Call2 are readnone, they don't interact.
283   auto Call1B = getMemoryEffects(Call1, AAQI);
284   if (Call1B.doesNotAccessMemory())
285     return ModRefInfo::NoModRef;
286 
287   auto Call2B = getMemoryEffects(Call2, AAQI);
288   if (Call2B.doesNotAccessMemory())
289     return ModRefInfo::NoModRef;
290 
291   // If they both only read from memory, there is no dependence.
292   if (Call1B.onlyReadsMemory() && Call2B.onlyReadsMemory())
293     return ModRefInfo::NoModRef;
294 
295   // If Call1 only reads memory, the only dependence on Call2 can be
296   // from Call1 reading memory written by Call2.
297   if (Call1B.onlyReadsMemory())
298     Result &= ModRefInfo::Ref;
299   else if (Call1B.onlyWritesMemory())
300     Result &= ModRefInfo::Mod;
301 
302   // If Call2 only access memory through arguments, accumulate the mod/ref
303   // information from Call1's references to the memory referenced by
304   // Call2's arguments.
305   if (Call2B.onlyAccessesArgPointees()) {
306     if (!Call2B.doesAccessArgPointees())
307       return ModRefInfo::NoModRef;
308     ModRefInfo R = ModRefInfo::NoModRef;
309     for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
310       const Value *Arg = *I;
311       if (!Arg->getType()->isPointerTy())
312         continue;
313       unsigned Call2ArgIdx = std::distance(Call2->arg_begin(), I);
314       auto Call2ArgLoc =
315           MemoryLocation::getForArgument(Call2, Call2ArgIdx, TLI);
316 
317       // ArgModRefC2 indicates what Call2 might do to Call2ArgLoc, and the
318       // dependence of Call1 on that location is the inverse:
319       // - If Call2 modifies location, dependence exists if Call1 reads or
320       //   writes.
321       // - If Call2 only reads location, dependence exists if Call1 writes.
322       ModRefInfo ArgModRefC2 = getArgModRefInfo(Call2, Call2ArgIdx);
323       ModRefInfo ArgMask = ModRefInfo::NoModRef;
324       if (isModSet(ArgModRefC2))
325         ArgMask = ModRefInfo::ModRef;
326       else if (isRefSet(ArgModRefC2))
327         ArgMask = ModRefInfo::Mod;
328 
329       // ModRefC1 indicates what Call1 might do to Call2ArgLoc, and we use
330       // above ArgMask to update dependence info.
331       ArgMask &= getModRefInfo(Call1, Call2ArgLoc, AAQI);
332 
333       R = (R | ArgMask) & Result;
334       if (R == Result)
335         break;
336     }
337 
338     return R;
339   }
340 
341   // If Call1 only accesses memory through arguments, check if Call2 references
342   // any of the memory referenced by Call1's arguments. If not, return NoModRef.
343   if (Call1B.onlyAccessesArgPointees()) {
344     if (!Call1B.doesAccessArgPointees())
345       return ModRefInfo::NoModRef;
346     ModRefInfo R = ModRefInfo::NoModRef;
347     for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
348       const Value *Arg = *I;
349       if (!Arg->getType()->isPointerTy())
350         continue;
351       unsigned Call1ArgIdx = std::distance(Call1->arg_begin(), I);
352       auto Call1ArgLoc =
353           MemoryLocation::getForArgument(Call1, Call1ArgIdx, TLI);
354 
355       // ArgModRefC1 indicates what Call1 might do to Call1ArgLoc; if Call1
356       // might Mod Call1ArgLoc, then we care about either a Mod or a Ref by
357       // Call2. If Call1 might Ref, then we care only about a Mod by Call2.
358       ModRefInfo ArgModRefC1 = getArgModRefInfo(Call1, Call1ArgIdx);
359       ModRefInfo ModRefC2 = getModRefInfo(Call2, Call1ArgLoc, AAQI);
360       if ((isModSet(ArgModRefC1) && isModOrRefSet(ModRefC2)) ||
361           (isRefSet(ArgModRefC1) && isModSet(ModRefC2)))
362         R = (R | ArgModRefC1) & Result;
363 
364       if (R == Result)
365         break;
366     }
367 
368     return R;
369   }
370 
371   return Result;
372 }
373 
getMemoryEffects(const CallBase * Call,AAQueryInfo & AAQI)374 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call,
375                                           AAQueryInfo &AAQI) {
376   MemoryEffects Result = MemoryEffects::unknown();
377 
378   for (const auto &AA : AAs) {
379     Result &= AA->getMemoryEffects(Call, AAQI);
380 
381     // Early-exit the moment we reach the bottom of the lattice.
382     if (Result.doesNotAccessMemory())
383       return Result;
384   }
385 
386   return Result;
387 }
388 
getMemoryEffects(const CallBase * Call)389 MemoryEffects AAResults::getMemoryEffects(const CallBase *Call) {
390   SimpleAAQueryInfo AAQI(*this);
391   return getMemoryEffects(Call, AAQI);
392 }
393 
getMemoryEffects(const Function * F)394 MemoryEffects AAResults::getMemoryEffects(const Function *F) {
395   MemoryEffects Result = MemoryEffects::unknown();
396 
397   for (const auto &AA : AAs) {
398     Result &= AA->getMemoryEffects(F);
399 
400     // Early-exit the moment we reach the bottom of the lattice.
401     if (Result.doesNotAccessMemory())
402       return Result;
403   }
404 
405   return Result;
406 }
407 
operator <<(raw_ostream & OS,AliasResult AR)408 raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) {
409   switch (AR) {
410   case AliasResult::NoAlias:
411     OS << "NoAlias";
412     break;
413   case AliasResult::MustAlias:
414     OS << "MustAlias";
415     break;
416   case AliasResult::MayAlias:
417     OS << "MayAlias";
418     break;
419   case AliasResult::PartialAlias:
420     OS << "PartialAlias";
421     if (AR.hasOffset())
422       OS << " (off " << AR.getOffset() << ")";
423     break;
424   }
425   return OS;
426 }
427 
operator <<(raw_ostream & OS,ModRefInfo MR)428 raw_ostream &llvm::operator<<(raw_ostream &OS, ModRefInfo MR) {
429   switch (MR) {
430   case ModRefInfo::NoModRef:
431     OS << "NoModRef";
432     break;
433   case ModRefInfo::Ref:
434     OS << "Ref";
435     break;
436   case ModRefInfo::Mod:
437     OS << "Mod";
438     break;
439   case ModRefInfo::ModRef:
440     OS << "ModRef";
441     break;
442   }
443   return OS;
444 }
445 
operator <<(raw_ostream & OS,MemoryEffects ME)446 raw_ostream &llvm::operator<<(raw_ostream &OS, MemoryEffects ME) {
447   for (IRMemLocation Loc : MemoryEffects::locations()) {
448     switch (Loc) {
449     case IRMemLocation::ArgMem:
450       OS << "ArgMem: ";
451       break;
452     case IRMemLocation::InaccessibleMem:
453       OS << "InaccessibleMem: ";
454       break;
455     case IRMemLocation::Other:
456       OS << "Other: ";
457       break;
458     }
459     OS << ME.getModRef(Loc) << ", ";
460   }
461   return OS;
462 }
463 
464 //===----------------------------------------------------------------------===//
465 // Helper method implementation
466 //===----------------------------------------------------------------------===//
467 
getModRefInfo(const LoadInst * L,const MemoryLocation & Loc,AAQueryInfo & AAQI)468 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
469                                     const MemoryLocation &Loc,
470                                     AAQueryInfo &AAQI) {
471   // Be conservative in the face of atomic.
472   if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
473     return ModRefInfo::ModRef;
474 
475   // If the load address doesn't alias the given address, it doesn't read
476   // or write the specified memory.
477   if (Loc.Ptr) {
478     AliasResult AR = alias(MemoryLocation::get(L), Loc, AAQI, L);
479     if (AR == AliasResult::NoAlias)
480       return ModRefInfo::NoModRef;
481   }
482   // Otherwise, a load just reads.
483   return ModRefInfo::Ref;
484 }
485 
getModRefInfo(const StoreInst * S,const MemoryLocation & Loc,AAQueryInfo & AAQI)486 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
487                                     const MemoryLocation &Loc,
488                                     AAQueryInfo &AAQI) {
489   // Be conservative in the face of atomic.
490   if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
491     return ModRefInfo::ModRef;
492 
493   if (Loc.Ptr) {
494     AliasResult AR = alias(MemoryLocation::get(S), Loc, AAQI, S);
495     // If the store address cannot alias the pointer in question, then the
496     // specified memory cannot be modified by the store.
497     if (AR == AliasResult::NoAlias)
498       return ModRefInfo::NoModRef;
499 
500     // Examine the ModRef mask. If Mod isn't present, then return NoModRef.
501     // This ensures that if Loc is a constant memory location, we take into
502     // account the fact that the store definitely could not modify the memory
503     // location.
504     if (!isModSet(getModRefInfoMask(Loc)))
505       return ModRefInfo::NoModRef;
506   }
507 
508   // Otherwise, a store just writes.
509   return ModRefInfo::Mod;
510 }
511 
getModRefInfo(const FenceInst * S,const MemoryLocation & Loc,AAQueryInfo & AAQI)512 ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
513                                     const MemoryLocation &Loc,
514                                     AAQueryInfo &AAQI) {
515   // All we know about a fence instruction is what we get from the ModRef
516   // mask: if Loc is a constant memory location, the fence definitely could
517   // not modify it.
518   if (Loc.Ptr)
519     return getModRefInfoMask(Loc);
520   return ModRefInfo::ModRef;
521 }
522 
getModRefInfo(const VAArgInst * V,const MemoryLocation & Loc,AAQueryInfo & AAQI)523 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
524                                     const MemoryLocation &Loc,
525                                     AAQueryInfo &AAQI) {
526   if (Loc.Ptr) {
527     AliasResult AR = alias(MemoryLocation::get(V), Loc, AAQI, V);
528     // If the va_arg address cannot alias the pointer in question, then the
529     // specified memory cannot be accessed by the va_arg.
530     if (AR == AliasResult::NoAlias)
531       return ModRefInfo::NoModRef;
532 
533     // If the pointer is a pointer to invariant memory, then it could not have
534     // been modified by this va_arg.
535     return getModRefInfoMask(Loc, AAQI);
536   }
537 
538   // Otherwise, a va_arg reads and writes.
539   return ModRefInfo::ModRef;
540 }
541 
getModRefInfo(const CatchPadInst * CatchPad,const MemoryLocation & Loc,AAQueryInfo & AAQI)542 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
543                                     const MemoryLocation &Loc,
544                                     AAQueryInfo &AAQI) {
545   if (Loc.Ptr) {
546     // If the pointer is a pointer to invariant memory,
547     // then it could not have been modified by this catchpad.
548     return getModRefInfoMask(Loc, AAQI);
549   }
550 
551   // Otherwise, a catchpad reads and writes.
552   return ModRefInfo::ModRef;
553 }
554 
getModRefInfo(const CatchReturnInst * CatchRet,const MemoryLocation & Loc,AAQueryInfo & AAQI)555 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
556                                     const MemoryLocation &Loc,
557                                     AAQueryInfo &AAQI) {
558   if (Loc.Ptr) {
559     // If the pointer is a pointer to invariant memory,
560     // then it could not have been modified by this catchpad.
561     return getModRefInfoMask(Loc, AAQI);
562   }
563 
564   // Otherwise, a catchret reads and writes.
565   return ModRefInfo::ModRef;
566 }
567 
getModRefInfo(const AtomicCmpXchgInst * CX,const MemoryLocation & Loc,AAQueryInfo & AAQI)568 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
569                                     const MemoryLocation &Loc,
570                                     AAQueryInfo &AAQI) {
571   // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
572   if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
573     return ModRefInfo::ModRef;
574 
575   if (Loc.Ptr) {
576     AliasResult AR = alias(MemoryLocation::get(CX), Loc, AAQI, CX);
577     // If the cmpxchg address does not alias the location, it does not access
578     // it.
579     if (AR == AliasResult::NoAlias)
580       return ModRefInfo::NoModRef;
581   }
582 
583   return ModRefInfo::ModRef;
584 }
585 
getModRefInfo(const AtomicRMWInst * RMW,const MemoryLocation & Loc,AAQueryInfo & AAQI)586 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
587                                     const MemoryLocation &Loc,
588                                     AAQueryInfo &AAQI) {
589   // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
590   if (isStrongerThanMonotonic(RMW->getOrdering()))
591     return ModRefInfo::ModRef;
592 
593   if (Loc.Ptr) {
594     AliasResult AR = alias(MemoryLocation::get(RMW), Loc, AAQI, RMW);
595     // If the atomicrmw address does not alias the location, it does not access
596     // it.
597     if (AR == AliasResult::NoAlias)
598       return ModRefInfo::NoModRef;
599   }
600 
601   return ModRefInfo::ModRef;
602 }
603 
getModRefInfo(const Instruction * I,const std::optional<MemoryLocation> & OptLoc,AAQueryInfo & AAQIP)604 ModRefInfo AAResults::getModRefInfo(const Instruction *I,
605                                     const std::optional<MemoryLocation> &OptLoc,
606                                     AAQueryInfo &AAQIP) {
607   if (OptLoc == std::nullopt) {
608     if (const auto *Call = dyn_cast<CallBase>(I))
609       return getMemoryEffects(Call, AAQIP).getModRef();
610   }
611 
612   const MemoryLocation &Loc = OptLoc.value_or(MemoryLocation());
613 
614   switch (I->getOpcode()) {
615   case Instruction::VAArg:
616     return getModRefInfo((const VAArgInst *)I, Loc, AAQIP);
617   case Instruction::Load:
618     return getModRefInfo((const LoadInst *)I, Loc, AAQIP);
619   case Instruction::Store:
620     return getModRefInfo((const StoreInst *)I, Loc, AAQIP);
621   case Instruction::Fence:
622     return getModRefInfo((const FenceInst *)I, Loc, AAQIP);
623   case Instruction::AtomicCmpXchg:
624     return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP);
625   case Instruction::AtomicRMW:
626     return getModRefInfo((const AtomicRMWInst *)I, Loc, AAQIP);
627   case Instruction::Call:
628   case Instruction::CallBr:
629   case Instruction::Invoke:
630     return getModRefInfo((const CallBase *)I, Loc, AAQIP);
631   case Instruction::CatchPad:
632     return getModRefInfo((const CatchPadInst *)I, Loc, AAQIP);
633   case Instruction::CatchRet:
634     return getModRefInfo((const CatchReturnInst *)I, Loc, AAQIP);
635   default:
636     assert(!I->mayReadOrWriteMemory() &&
637            "Unhandled memory access instruction!");
638     return ModRefInfo::NoModRef;
639   }
640 }
641 
642 /// Return information about whether a particular call site modifies
643 /// or reads the specified memory location \p MemLoc before instruction \p I
644 /// in a BasicBlock.
645 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
646 /// BasicAA isn't willing to spend linear time determining whether an alloca
647 /// was captured before or after this particular call, while we are. However,
648 /// with a smarter AA in place, this test is just wasting compile time.
callCapturesBefore(const Instruction * I,const MemoryLocation & MemLoc,DominatorTree * DT,AAQueryInfo & AAQI)649 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
650                                          const MemoryLocation &MemLoc,
651                                          DominatorTree *DT,
652                                          AAQueryInfo &AAQI) {
653   if (!DT)
654     return ModRefInfo::ModRef;
655 
656   const Value *Object = getUnderlyingObject(MemLoc.Ptr);
657   if (!isIdentifiedFunctionLocal(Object))
658     return ModRefInfo::ModRef;
659 
660   const auto *Call = dyn_cast<CallBase>(I);
661   if (!Call || Call == Object)
662     return ModRefInfo::ModRef;
663 
664   if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
665                                  /* StoreCaptures */ true, I, DT,
666                                  /* include Object */ true))
667     return ModRefInfo::ModRef;
668 
669   unsigned ArgNo = 0;
670   ModRefInfo R = ModRefInfo::NoModRef;
671   // Set flag only if no May found and all operands processed.
672   for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
673        CI != CE; ++CI, ++ArgNo) {
674     // Only look at the no-capture or byval pointer arguments.  If this
675     // pointer were passed to arguments that were neither of these, then it
676     // couldn't be no-capture.
677     if (!(*CI)->getType()->isPointerTy() ||
678         (!Call->doesNotCapture(ArgNo) && ArgNo < Call->arg_size() &&
679          !Call->isByValArgument(ArgNo)))
680       continue;
681 
682     AliasResult AR =
683         alias(MemoryLocation::getBeforeOrAfter(*CI),
684               MemoryLocation::getBeforeOrAfter(Object), AAQI, Call);
685     // If this is a no-capture pointer argument, see if we can tell that it
686     // is impossible to alias the pointer we're checking.  If not, we have to
687     // assume that the call could touch the pointer, even though it doesn't
688     // escape.
689     if (AR == AliasResult::NoAlias)
690       continue;
691     if (Call->doesNotAccessMemory(ArgNo))
692       continue;
693     if (Call->onlyReadsMemory(ArgNo)) {
694       R = ModRefInfo::Ref;
695       continue;
696     }
697     return ModRefInfo::ModRef;
698   }
699   return R;
700 }
701 
702 /// canBasicBlockModify - Return true if it is possible for execution of the
703 /// specified basic block to modify the location Loc.
704 ///
canBasicBlockModify(const BasicBlock & BB,const MemoryLocation & Loc)705 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
706                                     const MemoryLocation &Loc) {
707   return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod);
708 }
709 
710 /// canInstructionRangeModRef - Return true if it is possible for the
711 /// execution of the specified instructions to mod\ref (according to the
712 /// mode) the location Loc. The instructions to consider are all
713 /// of the instructions in the range of [I1,I2] INCLUSIVE.
714 /// I1 and I2 must be in the same basic block.
canInstructionRangeModRef(const Instruction & I1,const Instruction & I2,const MemoryLocation & Loc,const ModRefInfo Mode)715 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
716                                           const Instruction &I2,
717                                           const MemoryLocation &Loc,
718                                           const ModRefInfo Mode) {
719   assert(I1.getParent() == I2.getParent() &&
720          "Instructions not in same basic block!");
721   BasicBlock::const_iterator I = I1.getIterator();
722   BasicBlock::const_iterator E = I2.getIterator();
723   ++E;  // Convert from inclusive to exclusive range.
724 
725   for (; I != E; ++I) // Check every instruction in range
726     if (isModOrRefSet(getModRefInfo(&*I, Loc) & Mode))
727       return true;
728   return false;
729 }
730 
731 // Provide a definition for the root virtual destructor.
732 AAResults::Concept::~Concept() = default;
733 
734 // Provide a definition for the static object used to identify passes.
735 AnalysisKey AAManager::Key;
736 
ExternalAAWrapperPass()737 ExternalAAWrapperPass::ExternalAAWrapperPass() : ImmutablePass(ID) {
738   initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
739 }
740 
ExternalAAWrapperPass(CallbackT CB)741 ExternalAAWrapperPass::ExternalAAWrapperPass(CallbackT CB)
742     : ImmutablePass(ID), CB(std::move(CB)) {
743   initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
744 }
745 
746 char ExternalAAWrapperPass::ID = 0;
747 
748 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
749                 false, true)
750 
751 ImmutablePass *
createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback)752 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
753   return new ExternalAAWrapperPass(std::move(Callback));
754 }
755 
AAResultsWrapperPass()756 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
757   initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
758 }
759 
760 char AAResultsWrapperPass::ID = 0;
761 
762 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
763                       "Function Alias Analysis Results", false, true)
INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)764 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
765 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
766 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
767 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
768 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
769 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
770 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
771                     "Function Alias Analysis Results", false, true)
772 
773 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
774 ///
775 /// This is the legacy pass manager's interface to the new-style AA results
776 /// aggregation object. Because this is somewhat shoe-horned into the legacy
777 /// pass manager, we hard code all the specific alias analyses available into
778 /// it. While the particular set enabled is configured via commandline flags,
779 /// adding a new alias analysis to LLVM will require adding support for it to
780 /// this list.
781 bool AAResultsWrapperPass::runOnFunction(Function &F) {
782   // NB! This *must* be reset before adding new AA results to the new
783   // AAResults object because in the legacy pass manager, each instance
784   // of these will refer to the *same* immutable analyses, registering and
785   // unregistering themselves with them. We need to carefully tear down the
786   // previous object first, in this case replacing it with an empty one, before
787   // registering new results.
788   AAR.reset(
789       new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F)));
790 
791   // BasicAA is always available for function analyses. Also, we add it first
792   // so that it can trump TBAA results when it proves MustAlias.
793   // FIXME: TBAA should have an explicit mode to support this and then we
794   // should reconsider the ordering here.
795   if (!DisableBasicAA)
796     AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
797 
798   // Populate the results with the currently available AAs.
799   if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
800     AAR->addAAResult(WrapperPass->getResult());
801   if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
802     AAR->addAAResult(WrapperPass->getResult());
803   if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
804     AAR->addAAResult(WrapperPass->getResult());
805   if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
806     AAR->addAAResult(WrapperPass->getResult());
807 
808   // If available, run an external AA providing callback over the results as
809   // well.
810   if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
811     if (WrapperPass->CB)
812       WrapperPass->CB(*this, F, *AAR);
813 
814   // Analyses don't mutate the IR, so return false.
815   return false;
816 }
817 
getAnalysisUsage(AnalysisUsage & AU) const818 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
819   AU.setPreservesAll();
820   AU.addRequiredTransitive<BasicAAWrapperPass>();
821   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
822 
823   // We also need to mark all the alias analysis passes we will potentially
824   // probe in runOnFunction as used here to ensure the legacy pass manager
825   // preserves them. This hard coding of lists of alias analyses is specific to
826   // the legacy pass manager.
827   AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
828   AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
829   AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
830   AU.addUsedIfAvailable<SCEVAAWrapperPass>();
831   AU.addUsedIfAvailable<ExternalAAWrapperPass>();
832 }
833 
run(Function & F,FunctionAnalysisManager & AM)834 AAManager::Result AAManager::run(Function &F, FunctionAnalysisManager &AM) {
835   Result R(AM.getResult<TargetLibraryAnalysis>(F));
836   for (auto &Getter : ResultGetters)
837     (*Getter)(F, AM, R);
838   return R;
839 }
840 
isNoAliasCall(const Value * V)841 bool llvm::isNoAliasCall(const Value *V) {
842   if (const auto *Call = dyn_cast<CallBase>(V))
843     return Call->hasRetAttr(Attribute::NoAlias);
844   return false;
845 }
846 
isNoAliasOrByValArgument(const Value * V)847 static bool isNoAliasOrByValArgument(const Value *V) {
848   if (const Argument *A = dyn_cast<Argument>(V))
849     return A->hasNoAliasAttr() || A->hasByValAttr();
850   return false;
851 }
852 
isIdentifiedObject(const Value * V)853 bool llvm::isIdentifiedObject(const Value *V) {
854   if (isa<AllocaInst>(V))
855     return true;
856   if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
857     return true;
858   if (isNoAliasCall(V))
859     return true;
860   if (isNoAliasOrByValArgument(V))
861     return true;
862   return false;
863 }
864 
isIdentifiedFunctionLocal(const Value * V)865 bool llvm::isIdentifiedFunctionLocal(const Value *V) {
866   return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasOrByValArgument(V);
867 }
868 
isEscapeSource(const Value * V)869 bool llvm::isEscapeSource(const Value *V) {
870   if (auto *CB = dyn_cast<CallBase>(V))
871     return !isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CB,
872                                                                         true);
873 
874   // The load case works because isNonEscapingLocalObject considers all
875   // stores to be escapes (it passes true for the StoreCaptures argument
876   // to PointerMayBeCaptured).
877   if (isa<LoadInst>(V))
878     return true;
879 
880   // The inttoptr case works because isNonEscapingLocalObject considers all
881   // means of converting or equating a pointer to an int (ptrtoint, ptr store
882   // which could be followed by an integer load, ptr<->int compare) as
883   // escaping, and objects located at well-known addresses via platform-specific
884   // means cannot be considered non-escaping local objects.
885   if (isa<IntToPtrInst>(V))
886     return true;
887 
888   // Same for inttoptr constant expressions.
889   if (auto *CE = dyn_cast<ConstantExpr>(V))
890     if (CE->getOpcode() == Instruction::IntToPtr)
891       return true;
892 
893   return false;
894 }
895 
isNotVisibleOnUnwind(const Value * Object,bool & RequiresNoCaptureBeforeUnwind)896 bool llvm::isNotVisibleOnUnwind(const Value *Object,
897                                 bool &RequiresNoCaptureBeforeUnwind) {
898   RequiresNoCaptureBeforeUnwind = false;
899 
900   // Alloca goes out of scope on unwind.
901   if (isa<AllocaInst>(Object))
902     return true;
903 
904   // Byval goes out of scope on unwind.
905   if (auto *A = dyn_cast<Argument>(Object))
906     return A->hasByValAttr() || A->hasAttribute(Attribute::DeadOnUnwind);
907 
908   // A noalias return is not accessible from any other code. If the pointer
909   // does not escape prior to the unwind, then the caller cannot access the
910   // memory either.
911   if (isNoAliasCall(Object)) {
912     RequiresNoCaptureBeforeUnwind = true;
913     return true;
914   }
915 
916   return false;
917 }
918 
919 // We don't consider globals as writable: While the physical memory is writable,
920 // we may not have provenance to perform the write.
isWritableObject(const Value * Object,bool & ExplicitlyDereferenceableOnly)921 bool llvm::isWritableObject(const Value *Object,
922                             bool &ExplicitlyDereferenceableOnly) {
923   ExplicitlyDereferenceableOnly = false;
924 
925   // TODO: Alloca might not be writable after its lifetime ends.
926   // See https://github.com/llvm/llvm-project/issues/51838.
927   if (isa<AllocaInst>(Object))
928     return true;
929 
930   if (auto *A = dyn_cast<Argument>(Object)) {
931     if (A->hasAttribute(Attribute::Writable)) {
932       ExplicitlyDereferenceableOnly = true;
933       return true;
934     }
935 
936     return A->hasByValAttr();
937   }
938 
939   // TODO: Noalias shouldn't imply writability, this should check for an
940   // allocator function instead.
941   return isNoAliasCall(Object);
942 }
943