xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/IPO/AttributorAttributes.cpp (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SetOperations.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumeBundleQueries.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/InstructionSimplify.h"
27 #include "llvm/Analysis/LazyValueInfo.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
30 #include "llvm/Analysis/ScalarEvolution.h"
31 #include "llvm/Analysis/TargetTransformInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Assumptions.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/NoFolder.h"
41 #include "llvm/Support/Alignment.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/FileSystem.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
48 #include "llvm/Transforms/Utils/Local.h"
49 #include <cassert>
50 
51 using namespace llvm;
52 
53 #define DEBUG_TYPE "attributor"
54 
55 static cl::opt<bool> ManifestInternal(
56     "attributor-manifest-internal", cl::Hidden,
57     cl::desc("Manifest Attributor internal string attributes."),
58     cl::init(false));
59 
60 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
61                                        cl::Hidden);
62 
63 template <>
64 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
65 
66 static cl::opt<unsigned, true> MaxPotentialValues(
67     "attributor-max-potential-values", cl::Hidden,
68     cl::desc("Maximum number of potential values to be "
69              "tracked for each position."),
70     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
71     cl::init(7));
72 
73 static cl::opt<unsigned>
74     MaxInterferingWrites("attributor-max-interfering-writes", cl::Hidden,
75                          cl::desc("Maximum number of interfering writes to "
76                                   "check before assuming all might interfere."),
77                          cl::init(6));
78 
79 STATISTIC(NumAAs, "Number of abstract attributes created");
80 
81 // Some helper macros to deal with statistics tracking.
82 //
83 // Usage:
84 // For simple IR attribute tracking overload trackStatistics in the abstract
85 // attribute and choose the right STATS_DECLTRACK_********* macro,
86 // e.g.,:
87 //  void trackStatistics() const override {
88 //    STATS_DECLTRACK_ARG_ATTR(returned)
89 //  }
90 // If there is a single "increment" side one can use the macro
91 // STATS_DECLTRACK with a custom message. If there are multiple increment
92 // sides, STATS_DECL and STATS_TRACK can also be used separately.
93 //
94 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
95   ("Number of " #TYPE " marked '" #NAME "'")
96 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
97 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
98 #define STATS_DECL(NAME, TYPE, MSG)                                            \
99   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
100 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
101 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
102   {                                                                            \
103     STATS_DECL(NAME, TYPE, MSG)                                                \
104     STATS_TRACK(NAME, TYPE)                                                    \
105   }
106 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
107   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
108 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
109   STATS_DECLTRACK(NAME, CSArguments,                                           \
110                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
111 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
112   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
113 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
114   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
115 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
116   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
117                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
118 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
119   STATS_DECLTRACK(NAME, CSReturn,                                              \
120                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
121 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
122   STATS_DECLTRACK(NAME, Floating,                                              \
123                   ("Number of floating values known to be '" #NAME "'"))
124 
125 // Specialization of the operator<< for abstract attributes subclasses. This
126 // disambiguates situations where multiple operators are applicable.
127 namespace llvm {
128 #define PIPE_OPERATOR(CLASS)                                                   \
129   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
130     return OS << static_cast<const AbstractAttribute &>(AA);                   \
131   }
132 
133 PIPE_OPERATOR(AAIsDead)
134 PIPE_OPERATOR(AANoUnwind)
135 PIPE_OPERATOR(AANoSync)
136 PIPE_OPERATOR(AANoRecurse)
137 PIPE_OPERATOR(AAWillReturn)
138 PIPE_OPERATOR(AANoReturn)
139 PIPE_OPERATOR(AAReturnedValues)
140 PIPE_OPERATOR(AANonNull)
141 PIPE_OPERATOR(AANoAlias)
142 PIPE_OPERATOR(AADereferenceable)
143 PIPE_OPERATOR(AAAlign)
144 PIPE_OPERATOR(AANoCapture)
145 PIPE_OPERATOR(AAValueSimplify)
146 PIPE_OPERATOR(AANoFree)
147 PIPE_OPERATOR(AAHeapToStack)
148 PIPE_OPERATOR(AAReachability)
149 PIPE_OPERATOR(AAMemoryBehavior)
150 PIPE_OPERATOR(AAMemoryLocation)
151 PIPE_OPERATOR(AAValueConstantRange)
152 PIPE_OPERATOR(AAPrivatizablePtr)
153 PIPE_OPERATOR(AAUndefinedBehavior)
154 PIPE_OPERATOR(AAPotentialValues)
155 PIPE_OPERATOR(AANoUndef)
156 PIPE_OPERATOR(AACallEdges)
157 PIPE_OPERATOR(AAFunctionReachability)
158 PIPE_OPERATOR(AAPointerInfo)
159 PIPE_OPERATOR(AAAssumptionInfo)
160 
161 #undef PIPE_OPERATOR
162 
163 template <>
164 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
165                                                      const DerefState &R) {
166   ChangeStatus CS0 =
167       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
168   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
169   return CS0 | CS1;
170 }
171 
172 } // namespace llvm
173 
174 /// Get pointer operand of memory accessing instruction. If \p I is
175 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
176 /// is set to false and the instruction is volatile, return nullptr.
177 static const Value *getPointerOperand(const Instruction *I,
178                                       bool AllowVolatile) {
179   if (!AllowVolatile && I->isVolatile())
180     return nullptr;
181 
182   if (auto *LI = dyn_cast<LoadInst>(I)) {
183     return LI->getPointerOperand();
184   }
185 
186   if (auto *SI = dyn_cast<StoreInst>(I)) {
187     return SI->getPointerOperand();
188   }
189 
190   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
191     return CXI->getPointerOperand();
192   }
193 
194   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
195     return RMWI->getPointerOperand();
196   }
197 
198   return nullptr;
199 }
200 
201 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
202 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
203 /// getelement pointer instructions that traverse the natural type of \p Ptr if
204 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
205 /// through a cast to i8*.
206 ///
207 /// TODO: This could probably live somewhere more prominantly if it doesn't
208 ///       already exist.
209 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
210                                int64_t Offset, IRBuilder<NoFolder> &IRB,
211                                const DataLayout &DL) {
212   assert(Offset >= 0 && "Negative offset not supported yet!");
213   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
214                     << "-bytes as " << *ResTy << "\n");
215 
216   if (Offset) {
217     Type *Ty = PtrElemTy;
218     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
219     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
220 
221     SmallVector<Value *, 4> ValIndices;
222     std::string GEPName = Ptr->getName().str();
223     for (const APInt &Index : IntIndices) {
224       ValIndices.push_back(IRB.getInt(Index));
225       GEPName += "." + std::to_string(Index.getZExtValue());
226     }
227 
228     // Create a GEP for the indices collected above.
229     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
230 
231     // If an offset is left we use byte-wise adjustment.
232     if (IntOffset != 0) {
233       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
234       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
235                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
236     }
237   }
238 
239   // Ensure the result has the requested type.
240   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
241                                                 Ptr->getName() + ".cast");
242 
243   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
244   return Ptr;
245 }
246 
247 /// Recursively visit all values that might become \p IRP at some point. This
248 /// will be done by looking through cast instructions, selects, phis, and calls
249 /// with the "returned" attribute. Once we cannot look through the value any
250 /// further, the callback \p VisitValueCB is invoked and passed the current
251 /// value, the \p State, and a flag to indicate if we stripped anything.
252 /// Stripped means that we unpacked the value associated with \p IRP at least
253 /// once. Note that the value used for the callback may still be the value
254 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
255 /// we will never visit more values than specified by \p MaxValues.
256 /// If \p Intraprocedural is set to true only values valid in the scope of
257 /// \p CtxI will be visited and simplification into other scopes is prevented.
258 template <typename StateTy>
259 static bool genericValueTraversal(
260     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
261     StateTy &State,
262     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
263         VisitValueCB,
264     const Instruction *CtxI, bool &UsedAssumedInformation,
265     bool UseValueSimplify = true, int MaxValues = 16,
266     function_ref<Value *(Value *)> StripCB = nullptr,
267     bool Intraprocedural = false) {
268 
269   struct LivenessInfo {
270     const AAIsDead *LivenessAA = nullptr;
271     bool AnyDead = false;
272   };
273   DenseMap<const Function *, LivenessInfo> LivenessAAs;
274   auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
275     LivenessInfo &LI = LivenessAAs[&F];
276     if (!LI.LivenessAA)
277       LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F),
278                                             DepClassTy::NONE);
279     return LI;
280   };
281 
282   Value *InitialV = &IRP.getAssociatedValue();
283   using Item = std::pair<Value *, const Instruction *>;
284   SmallSet<Item, 16> Visited;
285   SmallVector<Item, 16> Worklist;
286   Worklist.push_back({InitialV, CtxI});
287 
288   int Iteration = 0;
289   do {
290     Item I = Worklist.pop_back_val();
291     Value *V = I.first;
292     CtxI = I.second;
293     if (StripCB)
294       V = StripCB(V);
295 
296     // Check if we should process the current value. To prevent endless
297     // recursion keep a record of the values we followed!
298     if (!Visited.insert(I).second)
299       continue;
300 
301     // Make sure we limit the compile time for complex expressions.
302     if (Iteration++ >= MaxValues) {
303       LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
304                         << Iteration << "!\n");
305       return false;
306     }
307 
308     // Explicitly look through calls with a "returned" attribute if we do
309     // not have a pointer as stripPointerCasts only works on them.
310     Value *NewV = nullptr;
311     if (V->getType()->isPointerTy()) {
312       NewV = V->stripPointerCasts();
313     } else {
314       auto *CB = dyn_cast<CallBase>(V);
315       if (CB && CB->getCalledFunction()) {
316         for (Argument &Arg : CB->getCalledFunction()->args())
317           if (Arg.hasReturnedAttr()) {
318             NewV = CB->getArgOperand(Arg.getArgNo());
319             break;
320           }
321       }
322     }
323     if (NewV && NewV != V) {
324       Worklist.push_back({NewV, CtxI});
325       continue;
326     }
327 
328     // Look through select instructions, visit assumed potential values.
329     if (auto *SI = dyn_cast<SelectInst>(V)) {
330       Optional<Constant *> C = A.getAssumedConstant(
331           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
332       bool NoValueYet = !C.hasValue();
333       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
334         continue;
335       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
336         if (CI->isZero())
337           Worklist.push_back({SI->getFalseValue(), CtxI});
338         else
339           Worklist.push_back({SI->getTrueValue(), CtxI});
340         continue;
341       }
342       // We could not simplify the condition, assume both values.(
343       Worklist.push_back({SI->getTrueValue(), CtxI});
344       Worklist.push_back({SI->getFalseValue(), CtxI});
345       continue;
346     }
347 
348     // Look through phi nodes, visit all live operands.
349     if (auto *PHI = dyn_cast<PHINode>(V)) {
350       LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction());
351       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
352         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
353         if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) {
354           LI.AnyDead = true;
355           UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint();
356           continue;
357         }
358         Worklist.push_back(
359             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
360       }
361       continue;
362     }
363 
364     if (auto *Arg = dyn_cast<Argument>(V)) {
365       if (!Intraprocedural && !Arg->hasPassPointeeByValueCopyAttr()) {
366         SmallVector<Item> CallSiteValues;
367         bool UsedAssumedInformation = false;
368         if (A.checkForAllCallSites(
369                 [&](AbstractCallSite ACS) {
370                   // Callbacks might not have a corresponding call site operand,
371                   // stick with the argument in that case.
372                   Value *CSOp = ACS.getCallArgOperand(*Arg);
373                   if (!CSOp)
374                     return false;
375                   CallSiteValues.push_back({CSOp, ACS.getInstruction()});
376                   return true;
377                 },
378                 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) {
379           Worklist.append(CallSiteValues);
380           continue;
381         }
382       }
383     }
384 
385     if (UseValueSimplify && !isa<Constant>(V)) {
386       Optional<Value *> SimpleV =
387           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
388       if (!SimpleV.hasValue())
389         continue;
390       Value *NewV = SimpleV.getValue();
391       if (NewV && NewV != V) {
392         if (!Intraprocedural || !CtxI ||
393             AA::isValidInScope(*NewV, CtxI->getFunction())) {
394           Worklist.push_back({NewV, CtxI});
395           continue;
396         }
397       }
398     }
399 
400     // Once a leaf is reached we inform the user through the callback.
401     if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) {
402       LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: "
403                         << *V << "!\n");
404       return false;
405     }
406   } while (!Worklist.empty());
407 
408   // If we actually used liveness information so we have to record a dependence.
409   for (auto &It : LivenessAAs)
410     if (It.second.AnyDead)
411       A.recordDependence(*It.second.LivenessAA, QueryingAA,
412                          DepClassTy::OPTIONAL);
413 
414   // All values have been visited.
415   return true;
416 }
417 
418 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
419                                      SmallVectorImpl<Value *> &Objects,
420                                      const AbstractAttribute &QueryingAA,
421                                      const Instruction *CtxI,
422                                      bool &UsedAssumedInformation,
423                                      bool Intraprocedural) {
424   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
425   SmallPtrSet<Value *, 8> SeenObjects;
426   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
427                                      SmallVectorImpl<Value *> &Objects,
428                                      bool) -> bool {
429     if (SeenObjects.insert(&Val).second)
430       Objects.push_back(&Val);
431     return true;
432   };
433   if (!genericValueTraversal<decltype(Objects)>(
434           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
435           UsedAssumedInformation, true, 32, StripCB, Intraprocedural))
436     return false;
437   return true;
438 }
439 
440 const Value *stripAndAccumulateMinimalOffsets(
441     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
442     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
443     bool UseAssumed = false) {
444 
445   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
446     const IRPosition &Pos = IRPosition::value(V);
447     // Only track dependence if we are going to use the assumed info.
448     const AAValueConstantRange &ValueConstantRangeAA =
449         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
450                                          UseAssumed ? DepClassTy::OPTIONAL
451                                                     : DepClassTy::NONE);
452     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
453                                      : ValueConstantRangeAA.getKnown();
454     // We can only use the lower part of the range because the upper part can
455     // be higher than what the value can really be.
456     ROffset = Range.getSignedMin();
457     return true;
458   };
459 
460   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
461                                                 /* AllowInvariant */ false,
462                                                 AttributorAnalysis);
463 }
464 
465 static const Value *
466 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
467                         const Value *Ptr, int64_t &BytesOffset,
468                         const DataLayout &DL, bool AllowNonInbounds = false) {
469   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
470   const Value *Base = stripAndAccumulateMinimalOffsets(
471       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
472 
473   BytesOffset = OffsetAPInt.getSExtValue();
474   return Base;
475 }
476 
477 /// Clamp the information known for all returned values of a function
478 /// (identified by \p QueryingAA) into \p S.
479 template <typename AAType, typename StateType = typename AAType::StateType>
480 static void clampReturnedValueStates(
481     Attributor &A, const AAType &QueryingAA, StateType &S,
482     const IRPosition::CallBaseContext *CBContext = nullptr) {
483   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
484                     << QueryingAA << " into " << S << "\n");
485 
486   assert((QueryingAA.getIRPosition().getPositionKind() ==
487               IRPosition::IRP_RETURNED ||
488           QueryingAA.getIRPosition().getPositionKind() ==
489               IRPosition::IRP_CALL_SITE_RETURNED) &&
490          "Can only clamp returned value states for a function returned or call "
491          "site returned position!");
492 
493   // Use an optional state as there might not be any return values and we want
494   // to join (IntegerState::operator&) the state of all there are.
495   Optional<StateType> T;
496 
497   // Callback for each possibly returned value.
498   auto CheckReturnValue = [&](Value &RV) -> bool {
499     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
500     const AAType &AA =
501         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
502     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
503                       << " @ " << RVPos << "\n");
504     const StateType &AAS = AA.getState();
505     if (T.hasValue())
506       *T &= AAS;
507     else
508       T = AAS;
509     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
510                       << "\n");
511     return T->isValidState();
512   };
513 
514   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
515     S.indicatePessimisticFixpoint();
516   else if (T.hasValue())
517     S ^= *T;
518 }
519 
520 namespace {
521 /// Helper class for generic deduction: return value -> returned position.
522 template <typename AAType, typename BaseType,
523           typename StateType = typename BaseType::StateType,
524           bool PropagateCallBaseContext = false>
525 struct AAReturnedFromReturnedValues : public BaseType {
526   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
527       : BaseType(IRP, A) {}
528 
529   /// See AbstractAttribute::updateImpl(...).
530   ChangeStatus updateImpl(Attributor &A) override {
531     StateType S(StateType::getBestState(this->getState()));
532     clampReturnedValueStates<AAType, StateType>(
533         A, *this, S,
534         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
535     // TODO: If we know we visited all returned values, thus no are assumed
536     // dead, we can take the known information from the state T.
537     return clampStateAndIndicateChange<StateType>(this->getState(), S);
538   }
539 };
540 
541 /// Clamp the information known at all call sites for a given argument
542 /// (identified by \p QueryingAA) into \p S.
543 template <typename AAType, typename StateType = typename AAType::StateType>
544 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
545                                         StateType &S) {
546   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
547                     << QueryingAA << " into " << S << "\n");
548 
549   assert(QueryingAA.getIRPosition().getPositionKind() ==
550              IRPosition::IRP_ARGUMENT &&
551          "Can only clamp call site argument states for an argument position!");
552 
553   // Use an optional state as there might not be any return values and we want
554   // to join (IntegerState::operator&) the state of all there are.
555   Optional<StateType> T;
556 
557   // The argument number which is also the call site argument number.
558   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
559 
560   auto CallSiteCheck = [&](AbstractCallSite ACS) {
561     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
562     // Check if a coresponding argument was found or if it is on not associated
563     // (which can happen for callback calls).
564     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
565       return false;
566 
567     const AAType &AA =
568         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
569     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
570                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
571     const StateType &AAS = AA.getState();
572     if (T.hasValue())
573       *T &= AAS;
574     else
575       T = AAS;
576     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
577                       << "\n");
578     return T->isValidState();
579   };
580 
581   bool UsedAssumedInformation = false;
582   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
583                               UsedAssumedInformation))
584     S.indicatePessimisticFixpoint();
585   else if (T.hasValue())
586     S ^= *T;
587 }
588 
589 /// This function is the bridge between argument position and the call base
590 /// context.
591 template <typename AAType, typename BaseType,
592           typename StateType = typename AAType::StateType>
593 bool getArgumentStateFromCallBaseContext(Attributor &A,
594                                          BaseType &QueryingAttribute,
595                                          IRPosition &Pos, StateType &State) {
596   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
597          "Expected an 'argument' position !");
598   const CallBase *CBContext = Pos.getCallBaseContext();
599   if (!CBContext)
600     return false;
601 
602   int ArgNo = Pos.getCallSiteArgNo();
603   assert(ArgNo >= 0 && "Invalid Arg No!");
604 
605   const auto &AA = A.getAAFor<AAType>(
606       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
607       DepClassTy::REQUIRED);
608   const StateType &CBArgumentState =
609       static_cast<const StateType &>(AA.getState());
610 
611   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
612                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
613                     << "\n");
614 
615   // NOTE: If we want to do call site grouping it should happen here.
616   State ^= CBArgumentState;
617   return true;
618 }
619 
620 /// Helper class for generic deduction: call site argument -> argument position.
621 template <typename AAType, typename BaseType,
622           typename StateType = typename AAType::StateType,
623           bool BridgeCallBaseContext = false>
624 struct AAArgumentFromCallSiteArguments : public BaseType {
625   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
626       : BaseType(IRP, A) {}
627 
628   /// See AbstractAttribute::updateImpl(...).
629   ChangeStatus updateImpl(Attributor &A) override {
630     StateType S = StateType::getBestState(this->getState());
631 
632     if (BridgeCallBaseContext) {
633       bool Success =
634           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
635               A, *this, this->getIRPosition(), S);
636       if (Success)
637         return clampStateAndIndicateChange<StateType>(this->getState(), S);
638     }
639     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
640 
641     // TODO: If we know we visited all incoming values, thus no are assumed
642     // dead, we can take the known information from the state T.
643     return clampStateAndIndicateChange<StateType>(this->getState(), S);
644   }
645 };
646 
647 /// Helper class for generic replication: function returned -> cs returned.
648 template <typename AAType, typename BaseType,
649           typename StateType = typename BaseType::StateType,
650           bool IntroduceCallBaseContext = false>
651 struct AACallSiteReturnedFromReturned : public BaseType {
652   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
653       : BaseType(IRP, A) {}
654 
655   /// See AbstractAttribute::updateImpl(...).
656   ChangeStatus updateImpl(Attributor &A) override {
657     assert(this->getIRPosition().getPositionKind() ==
658                IRPosition::IRP_CALL_SITE_RETURNED &&
659            "Can only wrap function returned positions for call site returned "
660            "positions!");
661     auto &S = this->getState();
662 
663     const Function *AssociatedFunction =
664         this->getIRPosition().getAssociatedFunction();
665     if (!AssociatedFunction)
666       return S.indicatePessimisticFixpoint();
667 
668     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
669     if (IntroduceCallBaseContext)
670       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
671                         << CBContext << "\n");
672 
673     IRPosition FnPos = IRPosition::returned(
674         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
675     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
676     return clampStateAndIndicateChange(S, AA.getState());
677   }
678 };
679 } // namespace
680 
681 /// Helper function to accumulate uses.
682 template <class AAType, typename StateType = typename AAType::StateType>
683 static void followUsesInContext(AAType &AA, Attributor &A,
684                                 MustBeExecutedContextExplorer &Explorer,
685                                 const Instruction *CtxI,
686                                 SetVector<const Use *> &Uses,
687                                 StateType &State) {
688   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
689   for (unsigned u = 0; u < Uses.size(); ++u) {
690     const Use *U = Uses[u];
691     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
692       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
693       if (Found && AA.followUseInMBEC(A, U, UserI, State))
694         for (const Use &Us : UserI->uses())
695           Uses.insert(&Us);
696     }
697   }
698 }
699 
700 /// Use the must-be-executed-context around \p I to add information into \p S.
701 /// The AAType class is required to have `followUseInMBEC` method with the
702 /// following signature and behaviour:
703 ///
704 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
705 /// U - Underlying use.
706 /// I - The user of the \p U.
707 /// Returns true if the value should be tracked transitively.
708 ///
709 template <class AAType, typename StateType = typename AAType::StateType>
710 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
711                              Instruction &CtxI) {
712 
713   // Container for (transitive) uses of the associated value.
714   SetVector<const Use *> Uses;
715   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
716     Uses.insert(&U);
717 
718   MustBeExecutedContextExplorer &Explorer =
719       A.getInfoCache().getMustBeExecutedContextExplorer();
720 
721   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
722 
723   if (S.isAtFixpoint())
724     return;
725 
726   SmallVector<const BranchInst *, 4> BrInsts;
727   auto Pred = [&](const Instruction *I) {
728     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
729       if (Br->isConditional())
730         BrInsts.push_back(Br);
731     return true;
732   };
733 
734   // Here, accumulate conditional branch instructions in the context. We
735   // explore the child paths and collect the known states. The disjunction of
736   // those states can be merged to its own state. Let ParentState_i be a state
737   // to indicate the known information for an i-th branch instruction in the
738   // context. ChildStates are created for its successors respectively.
739   //
740   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
741   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
742   //      ...
743   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
744   //
745   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
746   //
747   // FIXME: Currently, recursive branches are not handled. For example, we
748   // can't deduce that ptr must be dereferenced in below function.
749   //
750   // void f(int a, int c, int *ptr) {
751   //    if(a)
752   //      if (b) {
753   //        *ptr = 0;
754   //      } else {
755   //        *ptr = 1;
756   //      }
757   //    else {
758   //      if (b) {
759   //        *ptr = 0;
760   //      } else {
761   //        *ptr = 1;
762   //      }
763   //    }
764   // }
765 
766   Explorer.checkForAllContext(&CtxI, Pred);
767   for (const BranchInst *Br : BrInsts) {
768     StateType ParentState;
769 
770     // The known state of the parent state is a conjunction of children's
771     // known states so it is initialized with a best state.
772     ParentState.indicateOptimisticFixpoint();
773 
774     for (const BasicBlock *BB : Br->successors()) {
775       StateType ChildState;
776 
777       size_t BeforeSize = Uses.size();
778       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
779 
780       // Erase uses which only appear in the child.
781       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
782         It = Uses.erase(It);
783 
784       ParentState &= ChildState;
785     }
786 
787     // Use only known state.
788     S += ParentState;
789   }
790 }
791 
792 /// ------------------------ PointerInfo ---------------------------------------
793 
794 namespace llvm {
795 namespace AA {
796 namespace PointerInfo {
797 
798 /// An access kind description as used by AAPointerInfo.
799 struct OffsetAndSize;
800 
801 struct State;
802 
803 } // namespace PointerInfo
804 } // namespace AA
805 
806 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
807 template <>
808 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
809   using Access = AAPointerInfo::Access;
810   static inline Access getEmptyKey();
811   static inline Access getTombstoneKey();
812   static unsigned getHashValue(const Access &A);
813   static bool isEqual(const Access &LHS, const Access &RHS);
814 };
815 
816 /// Helper that allows OffsetAndSize as a key in a DenseMap.
817 template <>
818 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
819     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
820 
821 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
822 /// but the instruction
823 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
824   using Base = DenseMapInfo<Instruction *>;
825   using Access = AAPointerInfo::Access;
826   static inline Access getEmptyKey();
827   static inline Access getTombstoneKey();
828   static unsigned getHashValue(const Access &A);
829   static bool isEqual(const Access &LHS, const Access &RHS);
830 };
831 
832 } // namespace llvm
833 
834 /// Helper to represent an access offset and size, with logic to deal with
835 /// uncertainty and check for overlapping accesses.
836 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
837   using BaseTy = std::pair<int64_t, int64_t>;
838   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
839   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
840   int64_t getOffset() const { return first; }
841   int64_t getSize() const { return second; }
842   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
843 
844   /// Return true if offset or size are unknown.
845   bool offsetOrSizeAreUnknown() const {
846     return getOffset() == OffsetAndSize::Unknown ||
847            getSize() == OffsetAndSize::Unknown;
848   }
849 
850   /// Return true if this offset and size pair might describe an address that
851   /// overlaps with \p OAS.
852   bool mayOverlap(const OffsetAndSize &OAS) const {
853     // Any unknown value and we are giving up -> overlap.
854     if (offsetOrSizeAreUnknown() || OAS.offsetOrSizeAreUnknown())
855       return true;
856 
857     // Check if one offset point is in the other interval [offset, offset+size].
858     return OAS.getOffset() + OAS.getSize() > getOffset() &&
859            OAS.getOffset() < getOffset() + getSize();
860   }
861 
862   /// Constant used to represent unknown offset or sizes.
863   static constexpr int64_t Unknown = 1 << 31;
864 };
865 
866 /// Implementation of the DenseMapInfo.
867 ///
868 ///{
869 inline llvm::AccessAsInstructionInfo::Access
870 llvm::AccessAsInstructionInfo::getEmptyKey() {
871   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
872 }
873 inline llvm::AccessAsInstructionInfo::Access
874 llvm::AccessAsInstructionInfo::getTombstoneKey() {
875   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
876                 nullptr);
877 }
878 unsigned llvm::AccessAsInstructionInfo::getHashValue(
879     const llvm::AccessAsInstructionInfo::Access &A) {
880   return Base::getHashValue(A.getRemoteInst());
881 }
882 bool llvm::AccessAsInstructionInfo::isEqual(
883     const llvm::AccessAsInstructionInfo::Access &LHS,
884     const llvm::AccessAsInstructionInfo::Access &RHS) {
885   return LHS.getRemoteInst() == RHS.getRemoteInst();
886 }
887 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
888 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
889   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
890                                nullptr);
891 }
892 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
893 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
894   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
895                                nullptr);
896 }
897 
898 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
899     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
900   return detail::combineHashValue(
901              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
902              (A.isWrittenValueYetUndetermined()
903                   ? ~0
904                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
905          A.getKind();
906 }
907 
908 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
909     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
910     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
911   return LHS == RHS;
912 }
913 ///}
914 
915 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
916 struct AA::PointerInfo::State : public AbstractState {
917 
918   /// Return the best possible representable state.
919   static State getBestState(const State &SIS) { return State(); }
920 
921   /// Return the worst possible representable state.
922   static State getWorstState(const State &SIS) {
923     State R;
924     R.indicatePessimisticFixpoint();
925     return R;
926   }
927 
928   State() {}
929   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
930   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
931 
932   const State &getAssumed() const { return *this; }
933 
934   /// See AbstractState::isValidState().
935   bool isValidState() const override { return BS.isValidState(); }
936 
937   /// See AbstractState::isAtFixpoint().
938   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
939 
940   /// See AbstractState::indicateOptimisticFixpoint().
941   ChangeStatus indicateOptimisticFixpoint() override {
942     BS.indicateOptimisticFixpoint();
943     return ChangeStatus::UNCHANGED;
944   }
945 
946   /// See AbstractState::indicatePessimisticFixpoint().
947   ChangeStatus indicatePessimisticFixpoint() override {
948     BS.indicatePessimisticFixpoint();
949     return ChangeStatus::CHANGED;
950   }
951 
952   State &operator=(const State &R) {
953     if (this == &R)
954       return *this;
955     BS = R.BS;
956     AccessBins = R.AccessBins;
957     return *this;
958   }
959 
960   State &operator=(State &&R) {
961     if (this == &R)
962       return *this;
963     std::swap(BS, R.BS);
964     std::swap(AccessBins, R.AccessBins);
965     return *this;
966   }
967 
968   bool operator==(const State &R) const {
969     if (BS != R.BS)
970       return false;
971     if (AccessBins.size() != R.AccessBins.size())
972       return false;
973     auto It = begin(), RIt = R.begin(), E = end();
974     while (It != E) {
975       if (It->getFirst() != RIt->getFirst())
976         return false;
977       auto &Accs = It->getSecond();
978       auto &RAccs = RIt->getSecond();
979       if (Accs.size() != RAccs.size())
980         return false;
981       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
982       while (AccIt != AccE) {
983         if (*AccIt != *RAccIt)
984           return false;
985         ++AccIt;
986         ++RAccIt;
987       }
988       ++It;
989       ++RIt;
990     }
991     return true;
992   }
993   bool operator!=(const State &R) const { return !(*this == R); }
994 
995   /// We store accesses in a set with the instruction as key.
996   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
997 
998   /// We store all accesses in bins denoted by their offset and size.
999   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
1000 
1001   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
1002   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
1003 
1004 protected:
1005   /// The bins with all the accesses for the associated pointer.
1006   DenseMap<OffsetAndSize, Accesses> AccessBins;
1007 
1008   /// Add a new access to the state at offset \p Offset and with size \p Size.
1009   /// The access is associated with \p I, writes \p Content (if anything), and
1010   /// is of kind \p Kind.
1011   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
1012   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
1013                          Optional<Value *> Content,
1014                          AAPointerInfo::AccessKind Kind, Type *Ty,
1015                          Instruction *RemoteI = nullptr,
1016                          Accesses *BinPtr = nullptr) {
1017     OffsetAndSize Key{Offset, Size};
1018     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
1019     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1020     // Check if we have an access for this instruction in this bin, if not,
1021     // simply add it.
1022     auto It = Bin.find(Acc);
1023     if (It == Bin.end()) {
1024       Bin.insert(Acc);
1025       return ChangeStatus::CHANGED;
1026     }
1027     // If the existing access is the same as then new one, nothing changed.
1028     AAPointerInfo::Access Before = *It;
1029     // The new one will be combined with the existing one.
1030     *It &= Acc;
1031     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1032   }
1033 
1034   /// See AAPointerInfo::forallInterferingAccesses.
1035   bool forallInterferingAccesses(
1036       Instruction &I,
1037       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1038     if (!isValidState())
1039       return false;
1040     // First find the offset and size of I.
1041     OffsetAndSize OAS(-1, -1);
1042     for (auto &It : AccessBins) {
1043       for (auto &Access : It.getSecond()) {
1044         if (Access.getRemoteInst() == &I) {
1045           OAS = It.getFirst();
1046           break;
1047         }
1048       }
1049       if (OAS.getSize() != -1)
1050         break;
1051     }
1052     if (OAS.getSize() == -1)
1053       return true;
1054 
1055     // Now that we have an offset and size, find all overlapping ones and use
1056     // the callback on the accesses.
1057     for (auto &It : AccessBins) {
1058       OffsetAndSize ItOAS = It.getFirst();
1059       if (!OAS.mayOverlap(ItOAS))
1060         continue;
1061       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
1062       for (auto &Access : It.getSecond())
1063         if (!CB(Access, IsExact))
1064           return false;
1065     }
1066     return true;
1067   }
1068 
1069 private:
1070   /// State to track fixpoint and validity.
1071   BooleanState BS;
1072 };
1073 
1074 struct AAPointerInfoImpl
1075     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1076   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1077   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1078 
1079   /// See AbstractAttribute::initialize(...).
1080   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1081 
1082   /// See AbstractAttribute::getAsStr().
1083   const std::string getAsStr() const override {
1084     return std::string("PointerInfo ") +
1085            (isValidState() ? (std::string("#") +
1086                               std::to_string(AccessBins.size()) + " bins")
1087                            : "<invalid>");
1088   }
1089 
1090   /// See AbstractAttribute::manifest(...).
1091   ChangeStatus manifest(Attributor &A) override {
1092     return AAPointerInfo::manifest(A);
1093   }
1094 
1095   bool forallInterferingAccesses(
1096       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1097       const override {
1098     return State::forallInterferingAccesses(LI, CB);
1099   }
1100   bool forallInterferingAccesses(
1101       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1102       const override {
1103     return State::forallInterferingAccesses(SI, CB);
1104   }
1105   bool forallInterferingWrites(
1106       Attributor &A, const AbstractAttribute &QueryingAA, LoadInst &LI,
1107       function_ref<bool(const Access &, bool)> UserCB) const override {
1108     SmallPtrSet<const Access *, 8> DominatingWrites;
1109     SmallVector<std::pair<const Access *, bool>, 8> InterferingWrites;
1110 
1111     Function &Scope = *LI.getFunction();
1112     const auto &NoSyncAA = A.getAAFor<AANoSync>(
1113         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1114     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
1115         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
1116     const bool NoSync = NoSyncAA.isAssumedNoSync();
1117 
1118     // Helper to determine if we need to consider threading, which we cannot
1119     // right now. However, if the function is (assumed) nosync or the thread
1120     // executing all instructions is the main thread only we can ignore
1121     // threading.
1122     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
1123       if (NoSync)
1124         return true;
1125       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
1126         return true;
1127       return false;
1128     };
1129 
1130     // Helper to determine if the access is executed by the same thread as the
1131     // load, for now it is sufficient to avoid any potential threading effects
1132     // as we cannot deal with them anyway.
1133     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
1134       return CanIgnoreThreading(*Acc.getLocalInst());
1135     };
1136 
1137     // TODO: Use inter-procedural reachability and dominance.
1138     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1139         QueryingAA, IRPosition::function(*LI.getFunction()),
1140         DepClassTy::OPTIONAL);
1141 
1142     const bool CanUseCFGResoning = CanIgnoreThreading(LI);
1143     InformationCache &InfoCache = A.getInfoCache();
1144     const DominatorTree *DT =
1145         NoRecurseAA.isKnownNoRecurse()
1146             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1147                   Scope)
1148             : nullptr;
1149 
1150     enum GPUAddressSpace : unsigned {
1151       Generic = 0,
1152       Global = 1,
1153       Shared = 3,
1154       Constant = 4,
1155       Local = 5,
1156     };
1157 
1158     // Helper to check if a value has "kernel lifetime", that is it will not
1159     // outlive a GPU kernel. This is true for shared, constant, and local
1160     // globals on AMD and NVIDIA GPUs.
1161     auto HasKernelLifetime = [&](Value *V, Module &M) {
1162       Triple T(M.getTargetTriple());
1163       if (!(T.isAMDGPU() || T.isNVPTX()))
1164         return false;
1165       switch (V->getType()->getPointerAddressSpace()) {
1166       case GPUAddressSpace::Shared:
1167       case GPUAddressSpace::Constant:
1168       case GPUAddressSpace::Local:
1169         return true;
1170       default:
1171         return false;
1172       };
1173     };
1174 
1175     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1176     // to determine if we should look at reachability from the callee. For
1177     // certain pointers we know the lifetime and we do not have to step into the
1178     // callee to determine reachability as the pointer would be dead in the
1179     // callee. See the conditional initialization below.
1180     std::function<bool(const Function &)> IsLiveInCalleeCB;
1181 
1182     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1183       // If the alloca containing function is not recursive the alloca
1184       // must be dead in the callee.
1185       const Function *AIFn = AI->getFunction();
1186       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1187           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1188       if (NoRecurseAA.isAssumedNoRecurse()) {
1189         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1190       }
1191     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1192       // If the global has kernel lifetime we can stop if we reach a kernel
1193       // as it is "dead" in the (unknown) callees.
1194       if (HasKernelLifetime(GV, *GV->getParent()))
1195         IsLiveInCalleeCB = [](const Function &Fn) {
1196           return !Fn.hasFnAttribute("kernel");
1197         };
1198     }
1199 
1200     auto AccessCB = [&](const Access &Acc, bool Exact) {
1201       if (!Acc.isWrite())
1202         return true;
1203 
1204       // For now we only filter accesses based on CFG reasoning which does not
1205       // work yet if we have threading effects, or the access is complicated.
1206       if (CanUseCFGResoning) {
1207         if (!AA::isPotentiallyReachable(A, *Acc.getLocalInst(), LI, QueryingAA,
1208                                         IsLiveInCalleeCB))
1209           return true;
1210         if (DT && Exact &&
1211             (Acc.getLocalInst()->getFunction() == LI.getFunction()) &&
1212             IsSameThreadAsLoad(Acc)) {
1213           if (DT->dominates(Acc.getLocalInst(), &LI))
1214             DominatingWrites.insert(&Acc);
1215         }
1216       }
1217 
1218       InterferingWrites.push_back({&Acc, Exact});
1219       return true;
1220     };
1221     if (!State::forallInterferingAccesses(LI, AccessCB))
1222       return false;
1223 
1224     // If we cannot use CFG reasoning we only filter the non-write accesses
1225     // and are done here.
1226     if (!CanUseCFGResoning) {
1227       for (auto &It : InterferingWrites)
1228         if (!UserCB(*It.first, It.second))
1229           return false;
1230       return true;
1231     }
1232 
1233     // Helper to determine if we can skip a specific write access. This is in
1234     // the worst case quadratic as we are looking for another write that will
1235     // hide the effect of this one.
1236     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1237       if (!IsSameThreadAsLoad(Acc))
1238         return false;
1239       if (!DominatingWrites.count(&Acc))
1240         return false;
1241       for (const Access *DomAcc : DominatingWrites) {
1242         assert(Acc.getLocalInst()->getFunction() ==
1243                    DomAcc->getLocalInst()->getFunction() &&
1244                "Expected dominating writes to be in the same function!");
1245 
1246         if (DomAcc != &Acc &&
1247             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1248           return true;
1249         }
1250       }
1251       return false;
1252     };
1253 
1254     // Run the user callback on all writes we cannot skip and return if that
1255     // succeeded for all or not.
1256     unsigned NumInterferingWrites = InterferingWrites.size();
1257     for (auto &It : InterferingWrites) {
1258       if (!DT || NumInterferingWrites > MaxInterferingWrites ||
1259           !CanSkipAccess(*It.first, It.second)) {
1260         if (!UserCB(*It.first, It.second))
1261           return false;
1262       }
1263     }
1264     return true;
1265   }
1266 
1267   ChangeStatus translateAndAddCalleeState(Attributor &A,
1268                                           const AAPointerInfo &CalleeAA,
1269                                           int64_t CallArgOffset, CallBase &CB) {
1270     using namespace AA::PointerInfo;
1271     if (!CalleeAA.getState().isValidState() || !isValidState())
1272       return indicatePessimisticFixpoint();
1273 
1274     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1275     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1276 
1277     // Combine the accesses bin by bin.
1278     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1279     for (auto &It : CalleeImplAA.getState()) {
1280       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1281       if (CallArgOffset != OffsetAndSize::Unknown)
1282         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1283                             It.first.getSize());
1284       Accesses &Bin = AccessBins[OAS];
1285       for (const AAPointerInfo::Access &RAcc : It.second) {
1286         if (IsByval && !RAcc.isRead())
1287           continue;
1288         bool UsedAssumedInformation = false;
1289         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1290             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1291         AccessKind AK =
1292             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1293                                                  : AccessKind::AK_READ_WRITE));
1294         Changed =
1295             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1296                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1297       }
1298     }
1299     return Changed;
1300   }
1301 
1302   /// Statistic tracking for all AAPointerInfo implementations.
1303   /// See AbstractAttribute::trackStatistics().
1304   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1305 };
1306 
1307 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1308   using AccessKind = AAPointerInfo::AccessKind;
1309   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1310       : AAPointerInfoImpl(IRP, A) {}
1311 
1312   /// See AbstractAttribute::initialize(...).
1313   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1314 
1315   /// Deal with an access and signal if it was handled successfully.
1316   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1317                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1318                     ChangeStatus &Changed, Type *Ty,
1319                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1320     using namespace AA::PointerInfo;
1321     // No need to find a size if one is given or the offset is unknown.
1322     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1323         Ty) {
1324       const DataLayout &DL = A.getDataLayout();
1325       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1326       if (!AccessSize.isScalable())
1327         Size = AccessSize.getFixedSize();
1328     }
1329     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1330     return true;
1331   };
1332 
1333   /// Helper struct, will support ranges eventually.
1334   struct OffsetInfo {
1335     int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1336 
1337     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1338   };
1339 
1340   /// See AbstractAttribute::updateImpl(...).
1341   ChangeStatus updateImpl(Attributor &A) override {
1342     using namespace AA::PointerInfo;
1343     State S = getState();
1344     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1345     Value &AssociatedValue = getAssociatedValue();
1346 
1347     const DataLayout &DL = A.getDataLayout();
1348     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1349     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1350 
1351     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1352                                      bool &Follow) {
1353       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1354       UsrOI = PtrOI;
1355       Follow = true;
1356       return true;
1357     };
1358 
1359     const auto *TLI = getAnchorScope()
1360                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1361                                 *getAnchorScope())
1362                           : nullptr;
1363     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1364       Value *CurPtr = U.get();
1365       User *Usr = U.getUser();
1366       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1367                         << *Usr << "\n");
1368       assert(OffsetInfoMap.count(CurPtr) &&
1369              "The current pointer offset should have been seeded!");
1370 
1371       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1372         if (CE->isCast())
1373           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1374         if (CE->isCompare())
1375           return true;
1376         if (!isa<GEPOperator>(CE)) {
1377           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1378                             << "\n");
1379           return false;
1380         }
1381       }
1382       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1383         // Note the order here, the Usr access might change the map, CurPtr is
1384         // already in it though.
1385         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1386         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1387         UsrOI = PtrOI;
1388 
1389         // TODO: Use range information.
1390         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1391             !GEP->hasAllConstantIndices()) {
1392           UsrOI.Offset = OffsetAndSize::Unknown;
1393           Follow = true;
1394           return true;
1395         }
1396 
1397         SmallVector<Value *, 8> Indices;
1398         for (Use &Idx : GEP->indices()) {
1399           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1400             Indices.push_back(CIdx);
1401             continue;
1402           }
1403 
1404           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1405                             << " : " << *Idx << "\n");
1406           return false;
1407         }
1408         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1409                                           GEP->getSourceElementType(), Indices);
1410         Follow = true;
1411         return true;
1412       }
1413       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1414         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1415 
1416       // For PHIs we need to take care of the recurrence explicitly as the value
1417       // might change while we iterate through a loop. For now, we give up if
1418       // the PHI is not invariant.
1419       if (isa<PHINode>(Usr)) {
1420         // Note the order here, the Usr access might change the map, CurPtr is
1421         // already in it though.
1422         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1423         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1424         // Check if the PHI is invariant (so far).
1425         if (UsrOI == PtrOI)
1426           return true;
1427 
1428         // Check if the PHI operand has already an unknown offset as we can't
1429         // improve on that anymore.
1430         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1431           UsrOI = PtrOI;
1432           Follow = true;
1433           return true;
1434         }
1435 
1436         // Check if the PHI operand is not dependent on the PHI itself.
1437         // TODO: This is not great as we look at the pointer type. However, it
1438         // is unclear where the Offset size comes from with typeless pointers.
1439         APInt Offset(
1440             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1441             0);
1442         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1443                                     DL, Offset, /* AllowNonInbounds */ true)) {
1444           if (Offset != PtrOI.Offset) {
1445             LLVM_DEBUG(dbgs()
1446                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1447                        << *CurPtr << " in " << *Usr << "\n");
1448             return false;
1449           }
1450           return HandlePassthroughUser(Usr, PtrOI, Follow);
1451         }
1452 
1453         // TODO: Approximate in case we know the direction of the recurrence.
1454         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1455                           << *CurPtr << " in " << *Usr << "\n");
1456         UsrOI = PtrOI;
1457         UsrOI.Offset = OffsetAndSize::Unknown;
1458         Follow = true;
1459         return true;
1460       }
1461 
1462       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1463         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1464                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1465                             Changed, LoadI->getType());
1466       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1467         if (StoreI->getValueOperand() == CurPtr) {
1468           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1469                             << *StoreI << "\n");
1470           return false;
1471         }
1472         bool UsedAssumedInformation = false;
1473         Optional<Value *> Content = A.getAssumedSimplified(
1474             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1475         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1476                             OffsetInfoMap[CurPtr].Offset, Changed,
1477                             StoreI->getValueOperand()->getType());
1478       }
1479       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1480         if (CB->isLifetimeStartOrEnd())
1481           return true;
1482         if (TLI && isFreeCall(CB, TLI))
1483           return true;
1484         if (CB->isArgOperand(&U)) {
1485           unsigned ArgNo = CB->getArgOperandNo(&U);
1486           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1487               *this, IRPosition::callsite_argument(*CB, ArgNo),
1488               DepClassTy::REQUIRED);
1489           Changed = translateAndAddCalleeState(
1490                         A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) |
1491                     Changed;
1492           return true;
1493         }
1494         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1495                           << "\n");
1496         // TODO: Allow some call uses
1497         return false;
1498       }
1499 
1500       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1501       return false;
1502     };
1503     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1504       if (OffsetInfoMap.count(NewU))
1505         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1506       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1507       return true;
1508     };
1509     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1510                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1511                            EquivalentUseCB))
1512       return indicatePessimisticFixpoint();
1513 
1514     LLVM_DEBUG({
1515       dbgs() << "Accesses by bin after update:\n";
1516       for (auto &It : AccessBins) {
1517         dbgs() << "[" << It.first.getOffset() << "-"
1518                << It.first.getOffset() + It.first.getSize()
1519                << "] : " << It.getSecond().size() << "\n";
1520         for (auto &Acc : It.getSecond()) {
1521           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1522                  << "\n";
1523           if (Acc.getLocalInst() != Acc.getRemoteInst())
1524             dbgs() << "     -->                         "
1525                    << *Acc.getRemoteInst() << "\n";
1526           if (!Acc.isWrittenValueYetUndetermined())
1527             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1528         }
1529       }
1530     });
1531 
1532     return Changed;
1533   }
1534 
1535   /// See AbstractAttribute::trackStatistics()
1536   void trackStatistics() const override {
1537     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1538   }
1539 };
1540 
1541 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1542   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1543       : AAPointerInfoImpl(IRP, A) {}
1544 
1545   /// See AbstractAttribute::updateImpl(...).
1546   ChangeStatus updateImpl(Attributor &A) override {
1547     return indicatePessimisticFixpoint();
1548   }
1549 
1550   /// See AbstractAttribute::trackStatistics()
1551   void trackStatistics() const override {
1552     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1553   }
1554 };
1555 
1556 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1557   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1558       : AAPointerInfoFloating(IRP, A) {}
1559 
1560   /// See AbstractAttribute::initialize(...).
1561   void initialize(Attributor &A) override {
1562     AAPointerInfoFloating::initialize(A);
1563     if (getAnchorScope()->isDeclaration())
1564       indicatePessimisticFixpoint();
1565   }
1566 
1567   /// See AbstractAttribute::trackStatistics()
1568   void trackStatistics() const override {
1569     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1570   }
1571 };
1572 
1573 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1574   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1575       : AAPointerInfoFloating(IRP, A) {}
1576 
1577   /// See AbstractAttribute::updateImpl(...).
1578   ChangeStatus updateImpl(Attributor &A) override {
1579     using namespace AA::PointerInfo;
1580     // We handle memory intrinsics explicitly, at least the first (=
1581     // destination) and second (=source) arguments as we know how they are
1582     // accessed.
1583     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1584       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1585       int64_t LengthVal = OffsetAndSize::Unknown;
1586       if (Length)
1587         LengthVal = Length->getSExtValue();
1588       Value &Ptr = getAssociatedValue();
1589       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1590       ChangeStatus Changed;
1591       if (ArgNo == 0) {
1592         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1593                      nullptr, LengthVal);
1594       } else if (ArgNo == 1) {
1595         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1596                      nullptr, LengthVal);
1597       } else {
1598         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1599                           << *MI << "\n");
1600         return indicatePessimisticFixpoint();
1601       }
1602       return Changed;
1603     }
1604 
1605     // TODO: Once we have call site specific value information we can provide
1606     //       call site specific liveness information and then it makes
1607     //       sense to specialize attributes for call sites arguments instead of
1608     //       redirecting requests to the callee argument.
1609     Argument *Arg = getAssociatedArgument();
1610     if (!Arg)
1611       return indicatePessimisticFixpoint();
1612     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1613     auto &ArgAA =
1614         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1615     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1616   }
1617 
1618   /// See AbstractAttribute::trackStatistics()
1619   void trackStatistics() const override {
1620     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1621   }
1622 };
1623 
1624 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1625   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1626       : AAPointerInfoFloating(IRP, A) {}
1627 
1628   /// See AbstractAttribute::trackStatistics()
1629   void trackStatistics() const override {
1630     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1631   }
1632 };
1633 
1634 /// -----------------------NoUnwind Function Attribute--------------------------
1635 
1636 struct AANoUnwindImpl : AANoUnwind {
1637   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1638 
1639   const std::string getAsStr() const override {
1640     return getAssumed() ? "nounwind" : "may-unwind";
1641   }
1642 
1643   /// See AbstractAttribute::updateImpl(...).
1644   ChangeStatus updateImpl(Attributor &A) override {
1645     auto Opcodes = {
1646         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1647         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1648         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1649 
1650     auto CheckForNoUnwind = [&](Instruction &I) {
1651       if (!I.mayThrow())
1652         return true;
1653 
1654       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1655         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1656             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1657         return NoUnwindAA.isAssumedNoUnwind();
1658       }
1659       return false;
1660     };
1661 
1662     bool UsedAssumedInformation = false;
1663     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1664                                    UsedAssumedInformation))
1665       return indicatePessimisticFixpoint();
1666 
1667     return ChangeStatus::UNCHANGED;
1668   }
1669 };
1670 
1671 struct AANoUnwindFunction final : public AANoUnwindImpl {
1672   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1673       : AANoUnwindImpl(IRP, A) {}
1674 
1675   /// See AbstractAttribute::trackStatistics()
1676   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1677 };
1678 
1679 /// NoUnwind attribute deduction for a call sites.
1680 struct AANoUnwindCallSite final : AANoUnwindImpl {
1681   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1682       : AANoUnwindImpl(IRP, A) {}
1683 
1684   /// See AbstractAttribute::initialize(...).
1685   void initialize(Attributor &A) override {
1686     AANoUnwindImpl::initialize(A);
1687     Function *F = getAssociatedFunction();
1688     if (!F || F->isDeclaration())
1689       indicatePessimisticFixpoint();
1690   }
1691 
1692   /// See AbstractAttribute::updateImpl(...).
1693   ChangeStatus updateImpl(Attributor &A) override {
1694     // TODO: Once we have call site specific value information we can provide
1695     //       call site specific liveness information and then it makes
1696     //       sense to specialize attributes for call sites arguments instead of
1697     //       redirecting requests to the callee argument.
1698     Function *F = getAssociatedFunction();
1699     const IRPosition &FnPos = IRPosition::function(*F);
1700     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1701     return clampStateAndIndicateChange(getState(), FnAA.getState());
1702   }
1703 
1704   /// See AbstractAttribute::trackStatistics()
1705   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1706 };
1707 
1708 /// --------------------- Function Return Values -------------------------------
1709 
1710 /// "Attribute" that collects all potential returned values and the return
1711 /// instructions that they arise from.
1712 ///
1713 /// If there is a unique returned value R, the manifest method will:
1714 ///   - mark R with the "returned" attribute, if R is an argument.
1715 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1716 
1717   /// Mapping of values potentially returned by the associated function to the
1718   /// return instructions that might return them.
1719   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1720 
1721   /// State flags
1722   ///
1723   ///{
1724   bool IsFixed = false;
1725   bool IsValidState = true;
1726   ///}
1727 
1728 public:
1729   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1730       : AAReturnedValues(IRP, A) {}
1731 
1732   /// See AbstractAttribute::initialize(...).
1733   void initialize(Attributor &A) override {
1734     // Reset the state.
1735     IsFixed = false;
1736     IsValidState = true;
1737     ReturnedValues.clear();
1738 
1739     Function *F = getAssociatedFunction();
1740     if (!F || F->isDeclaration()) {
1741       indicatePessimisticFixpoint();
1742       return;
1743     }
1744     assert(!F->getReturnType()->isVoidTy() &&
1745            "Did not expect a void return type!");
1746 
1747     // The map from instruction opcodes to those instructions in the function.
1748     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1749 
1750     // Look through all arguments, if one is marked as returned we are done.
1751     for (Argument &Arg : F->args()) {
1752       if (Arg.hasReturnedAttr()) {
1753         auto &ReturnInstSet = ReturnedValues[&Arg];
1754         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1755           for (Instruction *RI : *Insts)
1756             ReturnInstSet.insert(cast<ReturnInst>(RI));
1757 
1758         indicateOptimisticFixpoint();
1759         return;
1760       }
1761     }
1762 
1763     if (!A.isFunctionIPOAmendable(*F))
1764       indicatePessimisticFixpoint();
1765   }
1766 
1767   /// See AbstractAttribute::manifest(...).
1768   ChangeStatus manifest(Attributor &A) override;
1769 
1770   /// See AbstractAttribute::getState(...).
1771   AbstractState &getState() override { return *this; }
1772 
1773   /// See AbstractAttribute::getState(...).
1774   const AbstractState &getState() const override { return *this; }
1775 
1776   /// See AbstractAttribute::updateImpl(Attributor &A).
1777   ChangeStatus updateImpl(Attributor &A) override;
1778 
1779   llvm::iterator_range<iterator> returned_values() override {
1780     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1781   }
1782 
1783   llvm::iterator_range<const_iterator> returned_values() const override {
1784     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1785   }
1786 
1787   /// Return the number of potential return values, -1 if unknown.
1788   size_t getNumReturnValues() const override {
1789     return isValidState() ? ReturnedValues.size() : -1;
1790   }
1791 
1792   /// Return an assumed unique return value if a single candidate is found. If
1793   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1794   /// Optional::NoneType.
1795   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1796 
1797   /// See AbstractState::checkForAllReturnedValues(...).
1798   bool checkForAllReturnedValuesAndReturnInsts(
1799       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1800       const override;
1801 
1802   /// Pretty print the attribute similar to the IR representation.
1803   const std::string getAsStr() const override;
1804 
1805   /// See AbstractState::isAtFixpoint().
1806   bool isAtFixpoint() const override { return IsFixed; }
1807 
1808   /// See AbstractState::isValidState().
1809   bool isValidState() const override { return IsValidState; }
1810 
1811   /// See AbstractState::indicateOptimisticFixpoint(...).
1812   ChangeStatus indicateOptimisticFixpoint() override {
1813     IsFixed = true;
1814     return ChangeStatus::UNCHANGED;
1815   }
1816 
1817   ChangeStatus indicatePessimisticFixpoint() override {
1818     IsFixed = true;
1819     IsValidState = false;
1820     return ChangeStatus::CHANGED;
1821   }
1822 };
1823 
1824 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1825   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1826 
1827   // Bookkeeping.
1828   assert(isValidState());
1829   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1830                   "Number of function with known return values");
1831 
1832   // Check if we have an assumed unique return value that we could manifest.
1833   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1834 
1835   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1836     return Changed;
1837 
1838   // Bookkeeping.
1839   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1840                   "Number of function with unique return");
1841   // If the assumed unique return value is an argument, annotate it.
1842   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1843     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1844             getAssociatedFunction()->getReturnType())) {
1845       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1846       Changed = IRAttribute::manifest(A);
1847     }
1848   }
1849   return Changed;
1850 }
1851 
1852 const std::string AAReturnedValuesImpl::getAsStr() const {
1853   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1854          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1855 }
1856 
1857 Optional<Value *>
1858 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1859   // If checkForAllReturnedValues provides a unique value, ignoring potential
1860   // undef values that can also be present, it is assumed to be the actual
1861   // return value and forwarded to the caller of this method. If there are
1862   // multiple, a nullptr is returned indicating there cannot be a unique
1863   // returned value.
1864   Optional<Value *> UniqueRV;
1865   Type *Ty = getAssociatedFunction()->getReturnType();
1866 
1867   auto Pred = [&](Value &RV) -> bool {
1868     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1869     return UniqueRV != Optional<Value *>(nullptr);
1870   };
1871 
1872   if (!A.checkForAllReturnedValues(Pred, *this))
1873     UniqueRV = nullptr;
1874 
1875   return UniqueRV;
1876 }
1877 
1878 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1879     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1880     const {
1881   if (!isValidState())
1882     return false;
1883 
1884   // Check all returned values but ignore call sites as long as we have not
1885   // encountered an overdefined one during an update.
1886   for (auto &It : ReturnedValues) {
1887     Value *RV = It.first;
1888     if (!Pred(*RV, It.second))
1889       return false;
1890   }
1891 
1892   return true;
1893 }
1894 
1895 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1896   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1897 
1898   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1899                            bool) -> bool {
1900     assert(AA::isValidInScope(V, Ret.getFunction()) &&
1901            "Assumed returned value should be valid in function scope!");
1902     if (ReturnedValues[&V].insert(&Ret))
1903       Changed = ChangeStatus::CHANGED;
1904     return true;
1905   };
1906 
1907   bool UsedAssumedInformation = false;
1908   auto ReturnInstCB = [&](Instruction &I) {
1909     ReturnInst &Ret = cast<ReturnInst>(I);
1910     return genericValueTraversal<ReturnInst>(
1911         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1912         &I, UsedAssumedInformation, /* UseValueSimplify */ true,
1913         /* MaxValues */ 16,
1914         /* StripCB */ nullptr, /* Intraprocedural */ true);
1915   };
1916 
1917   // Discover returned values from all live returned instructions in the
1918   // associated function.
1919   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1920                                  UsedAssumedInformation))
1921     return indicatePessimisticFixpoint();
1922   return Changed;
1923 }
1924 
1925 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1926   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1927       : AAReturnedValuesImpl(IRP, A) {}
1928 
1929   /// See AbstractAttribute::trackStatistics()
1930   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1931 };
1932 
1933 /// Returned values information for a call sites.
1934 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1935   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1936       : AAReturnedValuesImpl(IRP, A) {}
1937 
1938   /// See AbstractAttribute::initialize(...).
1939   void initialize(Attributor &A) override {
1940     // TODO: Once we have call site specific value information we can provide
1941     //       call site specific liveness information and then it makes
1942     //       sense to specialize attributes for call sites instead of
1943     //       redirecting requests to the callee.
1944     llvm_unreachable("Abstract attributes for returned values are not "
1945                      "supported for call sites yet!");
1946   }
1947 
1948   /// See AbstractAttribute::updateImpl(...).
1949   ChangeStatus updateImpl(Attributor &A) override {
1950     return indicatePessimisticFixpoint();
1951   }
1952 
1953   /// See AbstractAttribute::trackStatistics()
1954   void trackStatistics() const override {}
1955 };
1956 
1957 /// ------------------------ NoSync Function Attribute -------------------------
1958 
1959 struct AANoSyncImpl : AANoSync {
1960   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1961 
1962   const std::string getAsStr() const override {
1963     return getAssumed() ? "nosync" : "may-sync";
1964   }
1965 
1966   /// See AbstractAttribute::updateImpl(...).
1967   ChangeStatus updateImpl(Attributor &A) override;
1968 };
1969 
1970 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1971   if (!I->isAtomic())
1972     return false;
1973 
1974   if (auto *FI = dyn_cast<FenceInst>(I))
1975     // All legal orderings for fence are stronger than monotonic.
1976     return FI->getSyncScopeID() != SyncScope::SingleThread;
1977   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1978     // Unordered is not a legal ordering for cmpxchg.
1979     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1980             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1981   }
1982 
1983   AtomicOrdering Ordering;
1984   switch (I->getOpcode()) {
1985   case Instruction::AtomicRMW:
1986     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1987     break;
1988   case Instruction::Store:
1989     Ordering = cast<StoreInst>(I)->getOrdering();
1990     break;
1991   case Instruction::Load:
1992     Ordering = cast<LoadInst>(I)->getOrdering();
1993     break;
1994   default:
1995     llvm_unreachable(
1996         "New atomic operations need to be known in the attributor.");
1997   }
1998 
1999   return (Ordering != AtomicOrdering::Unordered &&
2000           Ordering != AtomicOrdering::Monotonic);
2001 }
2002 
2003 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
2004 /// which would be nosync except that they have a volatile flag.  All other
2005 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
2006 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
2007   if (auto *MI = dyn_cast<MemIntrinsic>(I))
2008     return !MI->isVolatile();
2009   return false;
2010 }
2011 
2012 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
2013 
2014   auto CheckRWInstForNoSync = [&](Instruction &I) {
2015     return AA::isNoSyncInst(A, I, *this);
2016   };
2017 
2018   auto CheckForNoSync = [&](Instruction &I) {
2019     // At this point we handled all read/write effects and they are all
2020     // nosync, so they can be skipped.
2021     if (I.mayReadOrWriteMemory())
2022       return true;
2023 
2024     // non-convergent and readnone imply nosync.
2025     return !cast<CallBase>(I).isConvergent();
2026   };
2027 
2028   bool UsedAssumedInformation = false;
2029   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
2030                                           UsedAssumedInformation) ||
2031       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
2032                                          UsedAssumedInformation))
2033     return indicatePessimisticFixpoint();
2034 
2035   return ChangeStatus::UNCHANGED;
2036 }
2037 
2038 struct AANoSyncFunction final : public AANoSyncImpl {
2039   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
2040       : AANoSyncImpl(IRP, A) {}
2041 
2042   /// See AbstractAttribute::trackStatistics()
2043   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
2044 };
2045 
2046 /// NoSync attribute deduction for a call sites.
2047 struct AANoSyncCallSite final : AANoSyncImpl {
2048   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
2049       : AANoSyncImpl(IRP, A) {}
2050 
2051   /// See AbstractAttribute::initialize(...).
2052   void initialize(Attributor &A) override {
2053     AANoSyncImpl::initialize(A);
2054     Function *F = getAssociatedFunction();
2055     if (!F || F->isDeclaration())
2056       indicatePessimisticFixpoint();
2057   }
2058 
2059   /// See AbstractAttribute::updateImpl(...).
2060   ChangeStatus updateImpl(Attributor &A) override {
2061     // TODO: Once we have call site specific value information we can provide
2062     //       call site specific liveness information and then it makes
2063     //       sense to specialize attributes for call sites arguments instead of
2064     //       redirecting requests to the callee argument.
2065     Function *F = getAssociatedFunction();
2066     const IRPosition &FnPos = IRPosition::function(*F);
2067     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
2068     return clampStateAndIndicateChange(getState(), FnAA.getState());
2069   }
2070 
2071   /// See AbstractAttribute::trackStatistics()
2072   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
2073 };
2074 
2075 /// ------------------------ No-Free Attributes ----------------------------
2076 
2077 struct AANoFreeImpl : public AANoFree {
2078   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
2079 
2080   /// See AbstractAttribute::updateImpl(...).
2081   ChangeStatus updateImpl(Attributor &A) override {
2082     auto CheckForNoFree = [&](Instruction &I) {
2083       const auto &CB = cast<CallBase>(I);
2084       if (CB.hasFnAttr(Attribute::NoFree))
2085         return true;
2086 
2087       const auto &NoFreeAA = A.getAAFor<AANoFree>(
2088           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2089       return NoFreeAA.isAssumedNoFree();
2090     };
2091 
2092     bool UsedAssumedInformation = false;
2093     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2094                                            UsedAssumedInformation))
2095       return indicatePessimisticFixpoint();
2096     return ChangeStatus::UNCHANGED;
2097   }
2098 
2099   /// See AbstractAttribute::getAsStr().
2100   const std::string getAsStr() const override {
2101     return getAssumed() ? "nofree" : "may-free";
2102   }
2103 };
2104 
2105 struct AANoFreeFunction final : public AANoFreeImpl {
2106   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2107       : AANoFreeImpl(IRP, A) {}
2108 
2109   /// See AbstractAttribute::trackStatistics()
2110   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2111 };
2112 
2113 /// NoFree attribute deduction for a call sites.
2114 struct AANoFreeCallSite final : AANoFreeImpl {
2115   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2116       : AANoFreeImpl(IRP, A) {}
2117 
2118   /// See AbstractAttribute::initialize(...).
2119   void initialize(Attributor &A) override {
2120     AANoFreeImpl::initialize(A);
2121     Function *F = getAssociatedFunction();
2122     if (!F || F->isDeclaration())
2123       indicatePessimisticFixpoint();
2124   }
2125 
2126   /// See AbstractAttribute::updateImpl(...).
2127   ChangeStatus updateImpl(Attributor &A) override {
2128     // TODO: Once we have call site specific value information we can provide
2129     //       call site specific liveness information and then it makes
2130     //       sense to specialize attributes for call sites arguments instead of
2131     //       redirecting requests to the callee argument.
2132     Function *F = getAssociatedFunction();
2133     const IRPosition &FnPos = IRPosition::function(*F);
2134     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2135     return clampStateAndIndicateChange(getState(), FnAA.getState());
2136   }
2137 
2138   /// See AbstractAttribute::trackStatistics()
2139   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2140 };
2141 
2142 /// NoFree attribute for floating values.
2143 struct AANoFreeFloating : AANoFreeImpl {
2144   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2145       : AANoFreeImpl(IRP, A) {}
2146 
2147   /// See AbstractAttribute::trackStatistics()
2148   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2149 
2150   /// See Abstract Attribute::updateImpl(...).
2151   ChangeStatus updateImpl(Attributor &A) override {
2152     const IRPosition &IRP = getIRPosition();
2153 
2154     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2155         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2156     if (NoFreeAA.isAssumedNoFree())
2157       return ChangeStatus::UNCHANGED;
2158 
2159     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2160     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2161       Instruction *UserI = cast<Instruction>(U.getUser());
2162       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2163         if (CB->isBundleOperand(&U))
2164           return false;
2165         if (!CB->isArgOperand(&U))
2166           return true;
2167         unsigned ArgNo = CB->getArgOperandNo(&U);
2168 
2169         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2170             *this, IRPosition::callsite_argument(*CB, ArgNo),
2171             DepClassTy::REQUIRED);
2172         return NoFreeArg.isAssumedNoFree();
2173       }
2174 
2175       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2176           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2177         Follow = true;
2178         return true;
2179       }
2180       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2181           isa<ReturnInst>(UserI))
2182         return true;
2183 
2184       // Unknown user.
2185       return false;
2186     };
2187     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2188       return indicatePessimisticFixpoint();
2189 
2190     return ChangeStatus::UNCHANGED;
2191   }
2192 };
2193 
2194 /// NoFree attribute for a call site argument.
2195 struct AANoFreeArgument final : AANoFreeFloating {
2196   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2197       : AANoFreeFloating(IRP, A) {}
2198 
2199   /// See AbstractAttribute::trackStatistics()
2200   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2201 };
2202 
2203 /// NoFree attribute for call site arguments.
2204 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2205   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2206       : AANoFreeFloating(IRP, A) {}
2207 
2208   /// See AbstractAttribute::updateImpl(...).
2209   ChangeStatus updateImpl(Attributor &A) override {
2210     // TODO: Once we have call site specific value information we can provide
2211     //       call site specific liveness information and then it makes
2212     //       sense to specialize attributes for call sites arguments instead of
2213     //       redirecting requests to the callee argument.
2214     Argument *Arg = getAssociatedArgument();
2215     if (!Arg)
2216       return indicatePessimisticFixpoint();
2217     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2218     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2219     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2220   }
2221 
2222   /// See AbstractAttribute::trackStatistics()
2223   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2224 };
2225 
2226 /// NoFree attribute for function return value.
2227 struct AANoFreeReturned final : AANoFreeFloating {
2228   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2229       : AANoFreeFloating(IRP, A) {
2230     llvm_unreachable("NoFree is not applicable to function returns!");
2231   }
2232 
2233   /// See AbstractAttribute::initialize(...).
2234   void initialize(Attributor &A) override {
2235     llvm_unreachable("NoFree is not applicable to function returns!");
2236   }
2237 
2238   /// See AbstractAttribute::updateImpl(...).
2239   ChangeStatus updateImpl(Attributor &A) override {
2240     llvm_unreachable("NoFree is not applicable to function returns!");
2241   }
2242 
2243   /// See AbstractAttribute::trackStatistics()
2244   void trackStatistics() const override {}
2245 };
2246 
2247 /// NoFree attribute deduction for a call site return value.
2248 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2249   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2250       : AANoFreeFloating(IRP, A) {}
2251 
2252   ChangeStatus manifest(Attributor &A) override {
2253     return ChangeStatus::UNCHANGED;
2254   }
2255   /// See AbstractAttribute::trackStatistics()
2256   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2257 };
2258 
2259 /// ------------------------ NonNull Argument Attribute ------------------------
2260 static int64_t getKnownNonNullAndDerefBytesForUse(
2261     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2262     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2263   TrackUse = false;
2264 
2265   const Value *UseV = U->get();
2266   if (!UseV->getType()->isPointerTy())
2267     return 0;
2268 
2269   // We need to follow common pointer manipulation uses to the accesses they
2270   // feed into. We can try to be smart to avoid looking through things we do not
2271   // like for now, e.g., non-inbounds GEPs.
2272   if (isa<CastInst>(I)) {
2273     TrackUse = true;
2274     return 0;
2275   }
2276 
2277   if (isa<GetElementPtrInst>(I)) {
2278     TrackUse = true;
2279     return 0;
2280   }
2281 
2282   Type *PtrTy = UseV->getType();
2283   const Function *F = I->getFunction();
2284   bool NullPointerIsDefined =
2285       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2286   const DataLayout &DL = A.getInfoCache().getDL();
2287   if (const auto *CB = dyn_cast<CallBase>(I)) {
2288     if (CB->isBundleOperand(U)) {
2289       if (RetainedKnowledge RK = getKnowledgeFromUse(
2290               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2291         IsNonNull |=
2292             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2293         return RK.ArgValue;
2294       }
2295       return 0;
2296     }
2297 
2298     if (CB->isCallee(U)) {
2299       IsNonNull |= !NullPointerIsDefined;
2300       return 0;
2301     }
2302 
2303     unsigned ArgNo = CB->getArgOperandNo(U);
2304     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2305     // As long as we only use known information there is no need to track
2306     // dependences here.
2307     auto &DerefAA =
2308         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2309     IsNonNull |= DerefAA.isKnownNonNull();
2310     return DerefAA.getKnownDereferenceableBytes();
2311   }
2312 
2313   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2314   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2315     return 0;
2316 
2317   int64_t Offset;
2318   const Value *Base =
2319       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2320   if (Base && Base == &AssociatedValue) {
2321     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2322     IsNonNull |= !NullPointerIsDefined;
2323     return std::max(int64_t(0), DerefBytes);
2324   }
2325 
2326   /// Corner case when an offset is 0.
2327   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2328                                           /*AllowNonInbounds*/ true);
2329   if (Base && Base == &AssociatedValue && Offset == 0) {
2330     int64_t DerefBytes = Loc->Size.getValue();
2331     IsNonNull |= !NullPointerIsDefined;
2332     return std::max(int64_t(0), DerefBytes);
2333   }
2334 
2335   return 0;
2336 }
2337 
2338 struct AANonNullImpl : AANonNull {
2339   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2340       : AANonNull(IRP, A),
2341         NullIsDefined(NullPointerIsDefined(
2342             getAnchorScope(),
2343             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2344 
2345   /// See AbstractAttribute::initialize(...).
2346   void initialize(Attributor &A) override {
2347     Value &V = getAssociatedValue();
2348     if (!NullIsDefined &&
2349         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2350                 /* IgnoreSubsumingPositions */ false, &A)) {
2351       indicateOptimisticFixpoint();
2352       return;
2353     }
2354 
2355     if (isa<ConstantPointerNull>(V)) {
2356       indicatePessimisticFixpoint();
2357       return;
2358     }
2359 
2360     AANonNull::initialize(A);
2361 
2362     bool CanBeNull, CanBeFreed;
2363     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2364                                          CanBeFreed)) {
2365       if (!CanBeNull) {
2366         indicateOptimisticFixpoint();
2367         return;
2368       }
2369     }
2370 
2371     if (isa<GlobalValue>(&getAssociatedValue())) {
2372       indicatePessimisticFixpoint();
2373       return;
2374     }
2375 
2376     if (Instruction *CtxI = getCtxI())
2377       followUsesInMBEC(*this, A, getState(), *CtxI);
2378   }
2379 
2380   /// See followUsesInMBEC
2381   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2382                        AANonNull::StateType &State) {
2383     bool IsNonNull = false;
2384     bool TrackUse = false;
2385     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2386                                        IsNonNull, TrackUse);
2387     State.setKnown(IsNonNull);
2388     return TrackUse;
2389   }
2390 
2391   /// See AbstractAttribute::getAsStr().
2392   const std::string getAsStr() const override {
2393     return getAssumed() ? "nonnull" : "may-null";
2394   }
2395 
2396   /// Flag to determine if the underlying value can be null and still allow
2397   /// valid accesses.
2398   const bool NullIsDefined;
2399 };
2400 
2401 /// NonNull attribute for a floating value.
2402 struct AANonNullFloating : public AANonNullImpl {
2403   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2404       : AANonNullImpl(IRP, A) {}
2405 
2406   /// See AbstractAttribute::updateImpl(...).
2407   ChangeStatus updateImpl(Attributor &A) override {
2408     const DataLayout &DL = A.getDataLayout();
2409 
2410     DominatorTree *DT = nullptr;
2411     AssumptionCache *AC = nullptr;
2412     InformationCache &InfoCache = A.getInfoCache();
2413     if (const Function *Fn = getAnchorScope()) {
2414       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2415       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2416     }
2417 
2418     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2419                             AANonNull::StateType &T, bool Stripped) -> bool {
2420       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2421                                              DepClassTy::REQUIRED);
2422       if (!Stripped && this == &AA) {
2423         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2424           T.indicatePessimisticFixpoint();
2425       } else {
2426         // Use abstract attribute information.
2427         const AANonNull::StateType &NS = AA.getState();
2428         T ^= NS;
2429       }
2430       return T.isValidState();
2431     };
2432 
2433     StateType T;
2434     bool UsedAssumedInformation = false;
2435     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2436                                           VisitValueCB, getCtxI(),
2437                                           UsedAssumedInformation))
2438       return indicatePessimisticFixpoint();
2439 
2440     return clampStateAndIndicateChange(getState(), T);
2441   }
2442 
2443   /// See AbstractAttribute::trackStatistics()
2444   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2445 };
2446 
2447 /// NonNull attribute for function return value.
2448 struct AANonNullReturned final
2449     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2450   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2451       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2452 
2453   /// See AbstractAttribute::getAsStr().
2454   const std::string getAsStr() const override {
2455     return getAssumed() ? "nonnull" : "may-null";
2456   }
2457 
2458   /// See AbstractAttribute::trackStatistics()
2459   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2460 };
2461 
2462 /// NonNull attribute for function argument.
2463 struct AANonNullArgument final
2464     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2465   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2466       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2467 
2468   /// See AbstractAttribute::trackStatistics()
2469   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2470 };
2471 
2472 struct AANonNullCallSiteArgument final : AANonNullFloating {
2473   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2474       : AANonNullFloating(IRP, A) {}
2475 
2476   /// See AbstractAttribute::trackStatistics()
2477   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2478 };
2479 
2480 /// NonNull attribute for a call site return position.
2481 struct AANonNullCallSiteReturned final
2482     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2483   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2484       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2485 
2486   /// See AbstractAttribute::trackStatistics()
2487   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2488 };
2489 
2490 /// ------------------------ No-Recurse Attributes ----------------------------
2491 
2492 struct AANoRecurseImpl : public AANoRecurse {
2493   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2494 
2495   /// See AbstractAttribute::getAsStr()
2496   const std::string getAsStr() const override {
2497     return getAssumed() ? "norecurse" : "may-recurse";
2498   }
2499 };
2500 
2501 struct AANoRecurseFunction final : AANoRecurseImpl {
2502   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2503       : AANoRecurseImpl(IRP, A) {}
2504 
2505   /// See AbstractAttribute::updateImpl(...).
2506   ChangeStatus updateImpl(Attributor &A) override {
2507 
2508     // If all live call sites are known to be no-recurse, we are as well.
2509     auto CallSitePred = [&](AbstractCallSite ACS) {
2510       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2511           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2512           DepClassTy::NONE);
2513       return NoRecurseAA.isKnownNoRecurse();
2514     };
2515     bool UsedAssumedInformation = false;
2516     if (A.checkForAllCallSites(CallSitePred, *this, true,
2517                                UsedAssumedInformation)) {
2518       // If we know all call sites and all are known no-recurse, we are done.
2519       // If all known call sites, which might not be all that exist, are known
2520       // to be no-recurse, we are not done but we can continue to assume
2521       // no-recurse. If one of the call sites we have not visited will become
2522       // live, another update is triggered.
2523       if (!UsedAssumedInformation)
2524         indicateOptimisticFixpoint();
2525       return ChangeStatus::UNCHANGED;
2526     }
2527 
2528     const AAFunctionReachability &EdgeReachability =
2529         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2530                                            DepClassTy::REQUIRED);
2531     if (EdgeReachability.canReach(A, *getAnchorScope()))
2532       return indicatePessimisticFixpoint();
2533     return ChangeStatus::UNCHANGED;
2534   }
2535 
2536   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2537 };
2538 
2539 /// NoRecurse attribute deduction for a call sites.
2540 struct AANoRecurseCallSite final : AANoRecurseImpl {
2541   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2542       : AANoRecurseImpl(IRP, A) {}
2543 
2544   /// See AbstractAttribute::initialize(...).
2545   void initialize(Attributor &A) override {
2546     AANoRecurseImpl::initialize(A);
2547     Function *F = getAssociatedFunction();
2548     if (!F || F->isDeclaration())
2549       indicatePessimisticFixpoint();
2550   }
2551 
2552   /// See AbstractAttribute::updateImpl(...).
2553   ChangeStatus updateImpl(Attributor &A) override {
2554     // TODO: Once we have call site specific value information we can provide
2555     //       call site specific liveness information and then it makes
2556     //       sense to specialize attributes for call sites arguments instead of
2557     //       redirecting requests to the callee argument.
2558     Function *F = getAssociatedFunction();
2559     const IRPosition &FnPos = IRPosition::function(*F);
2560     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2561     return clampStateAndIndicateChange(getState(), FnAA.getState());
2562   }
2563 
2564   /// See AbstractAttribute::trackStatistics()
2565   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2566 };
2567 
2568 /// -------------------- Undefined-Behavior Attributes ------------------------
2569 
2570 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2571   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2572       : AAUndefinedBehavior(IRP, A) {}
2573 
2574   /// See AbstractAttribute::updateImpl(...).
2575   // through a pointer (i.e. also branches etc.)
2576   ChangeStatus updateImpl(Attributor &A) override {
2577     const size_t UBPrevSize = KnownUBInsts.size();
2578     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2579 
2580     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2581       // Lang ref now states volatile store is not UB, let's skip them.
2582       if (I.isVolatile() && I.mayWriteToMemory())
2583         return true;
2584 
2585       // Skip instructions that are already saved.
2586       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2587         return true;
2588 
2589       // If we reach here, we know we have an instruction
2590       // that accesses memory through a pointer operand,
2591       // for which getPointerOperand() should give it to us.
2592       Value *PtrOp =
2593           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2594       assert(PtrOp &&
2595              "Expected pointer operand of memory accessing instruction");
2596 
2597       // Either we stopped and the appropriate action was taken,
2598       // or we got back a simplified value to continue.
2599       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2600       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2601         return true;
2602       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2603 
2604       // A memory access through a pointer is considered UB
2605       // only if the pointer has constant null value.
2606       // TODO: Expand it to not only check constant values.
2607       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2608         AssumedNoUBInsts.insert(&I);
2609         return true;
2610       }
2611       const Type *PtrTy = PtrOpVal->getType();
2612 
2613       // Because we only consider instructions inside functions,
2614       // assume that a parent function exists.
2615       const Function *F = I.getFunction();
2616 
2617       // A memory access using constant null pointer is only considered UB
2618       // if null pointer is _not_ defined for the target platform.
2619       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2620         AssumedNoUBInsts.insert(&I);
2621       else
2622         KnownUBInsts.insert(&I);
2623       return true;
2624     };
2625 
2626     auto InspectBrInstForUB = [&](Instruction &I) {
2627       // A conditional branch instruction is considered UB if it has `undef`
2628       // condition.
2629 
2630       // Skip instructions that are already saved.
2631       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2632         return true;
2633 
2634       // We know we have a branch instruction.
2635       auto *BrInst = cast<BranchInst>(&I);
2636 
2637       // Unconditional branches are never considered UB.
2638       if (BrInst->isUnconditional())
2639         return true;
2640 
2641       // Either we stopped and the appropriate action was taken,
2642       // or we got back a simplified value to continue.
2643       Optional<Value *> SimplifiedCond =
2644           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2645       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2646         return true;
2647       AssumedNoUBInsts.insert(&I);
2648       return true;
2649     };
2650 
2651     auto InspectCallSiteForUB = [&](Instruction &I) {
2652       // Check whether a callsite always cause UB or not
2653 
2654       // Skip instructions that are already saved.
2655       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2656         return true;
2657 
2658       // Check nonnull and noundef argument attribute violation for each
2659       // callsite.
2660       CallBase &CB = cast<CallBase>(I);
2661       Function *Callee = CB.getCalledFunction();
2662       if (!Callee)
2663         return true;
2664       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2665         // If current argument is known to be simplified to null pointer and the
2666         // corresponding argument position is known to have nonnull attribute,
2667         // the argument is poison. Furthermore, if the argument is poison and
2668         // the position is known to have noundef attriubte, this callsite is
2669         // considered UB.
2670         if (idx >= Callee->arg_size())
2671           break;
2672         Value *ArgVal = CB.getArgOperand(idx);
2673         if (!ArgVal)
2674           continue;
2675         // Here, we handle three cases.
2676         //   (1) Not having a value means it is dead. (we can replace the value
2677         //       with undef)
2678         //   (2) Simplified to undef. The argument violate noundef attriubte.
2679         //   (3) Simplified to null pointer where known to be nonnull.
2680         //       The argument is a poison value and violate noundef attribute.
2681         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2682         auto &NoUndefAA =
2683             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2684         if (!NoUndefAA.isKnownNoUndef())
2685           continue;
2686         bool UsedAssumedInformation = false;
2687         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2688             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2689         if (UsedAssumedInformation)
2690           continue;
2691         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2692           return true;
2693         if (!SimplifiedVal.hasValue() ||
2694             isa<UndefValue>(*SimplifiedVal.getValue())) {
2695           KnownUBInsts.insert(&I);
2696           continue;
2697         }
2698         if (!ArgVal->getType()->isPointerTy() ||
2699             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2700           continue;
2701         auto &NonNullAA =
2702             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2703         if (NonNullAA.isKnownNonNull())
2704           KnownUBInsts.insert(&I);
2705       }
2706       return true;
2707     };
2708 
2709     auto InspectReturnInstForUB = [&](Instruction &I) {
2710       auto &RI = cast<ReturnInst>(I);
2711       // Either we stopped and the appropriate action was taken,
2712       // or we got back a simplified return value to continue.
2713       Optional<Value *> SimplifiedRetValue =
2714           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2715       if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue())
2716         return true;
2717 
2718       // Check if a return instruction always cause UB or not
2719       // Note: It is guaranteed that the returned position of the anchor
2720       //       scope has noundef attribute when this is called.
2721       //       We also ensure the return position is not "assumed dead"
2722       //       because the returned value was then potentially simplified to
2723       //       `undef` in AAReturnedValues without removing the `noundef`
2724       //       attribute yet.
2725 
2726       // When the returned position has noundef attriubte, UB occurs in the
2727       // following cases.
2728       //   (1) Returned value is known to be undef.
2729       //   (2) The value is known to be a null pointer and the returned
2730       //       position has nonnull attribute (because the returned value is
2731       //       poison).
2732       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2733         auto &NonNullAA = A.getAAFor<AANonNull>(
2734             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2735         if (NonNullAA.isKnownNonNull())
2736           KnownUBInsts.insert(&I);
2737       }
2738 
2739       return true;
2740     };
2741 
2742     bool UsedAssumedInformation = false;
2743     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2744                               {Instruction::Load, Instruction::Store,
2745                                Instruction::AtomicCmpXchg,
2746                                Instruction::AtomicRMW},
2747                               UsedAssumedInformation,
2748                               /* CheckBBLivenessOnly */ true);
2749     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2750                               UsedAssumedInformation,
2751                               /* CheckBBLivenessOnly */ true);
2752     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2753                                       UsedAssumedInformation);
2754 
2755     // If the returned position of the anchor scope has noundef attriubte, check
2756     // all returned instructions.
2757     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2758       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2759       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2760         auto &RetPosNoUndefAA =
2761             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2762         if (RetPosNoUndefAA.isKnownNoUndef())
2763           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2764                                     {Instruction::Ret}, UsedAssumedInformation,
2765                                     /* CheckBBLivenessOnly */ true);
2766       }
2767     }
2768 
2769     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2770         UBPrevSize != KnownUBInsts.size())
2771       return ChangeStatus::CHANGED;
2772     return ChangeStatus::UNCHANGED;
2773   }
2774 
2775   bool isKnownToCauseUB(Instruction *I) const override {
2776     return KnownUBInsts.count(I);
2777   }
2778 
2779   bool isAssumedToCauseUB(Instruction *I) const override {
2780     // In simple words, if an instruction is not in the assumed to _not_
2781     // cause UB, then it is assumed UB (that includes those
2782     // in the KnownUBInsts set). The rest is boilerplate
2783     // is to ensure that it is one of the instructions we test
2784     // for UB.
2785 
2786     switch (I->getOpcode()) {
2787     case Instruction::Load:
2788     case Instruction::Store:
2789     case Instruction::AtomicCmpXchg:
2790     case Instruction::AtomicRMW:
2791       return !AssumedNoUBInsts.count(I);
2792     case Instruction::Br: {
2793       auto BrInst = cast<BranchInst>(I);
2794       if (BrInst->isUnconditional())
2795         return false;
2796       return !AssumedNoUBInsts.count(I);
2797     } break;
2798     default:
2799       return false;
2800     }
2801     return false;
2802   }
2803 
2804   ChangeStatus manifest(Attributor &A) override {
2805     if (KnownUBInsts.empty())
2806       return ChangeStatus::UNCHANGED;
2807     for (Instruction *I : KnownUBInsts)
2808       A.changeToUnreachableAfterManifest(I);
2809     return ChangeStatus::CHANGED;
2810   }
2811 
2812   /// See AbstractAttribute::getAsStr()
2813   const std::string getAsStr() const override {
2814     return getAssumed() ? "undefined-behavior" : "no-ub";
2815   }
2816 
2817   /// Note: The correctness of this analysis depends on the fact that the
2818   /// following 2 sets will stop changing after some point.
2819   /// "Change" here means that their size changes.
2820   /// The size of each set is monotonically increasing
2821   /// (we only add items to them) and it is upper bounded by the number of
2822   /// instructions in the processed function (we can never save more
2823   /// elements in either set than this number). Hence, at some point,
2824   /// they will stop increasing.
2825   /// Consequently, at some point, both sets will have stopped
2826   /// changing, effectively making the analysis reach a fixpoint.
2827 
2828   /// Note: These 2 sets are disjoint and an instruction can be considered
2829   /// one of 3 things:
2830   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2831   ///    the KnownUBInsts set.
2832   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2833   ///    has a reason to assume it).
2834   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2835   ///    could not find a reason to assume or prove that it can cause UB,
2836   ///    hence it assumes it doesn't. We have a set for these instructions
2837   ///    so that we don't reprocess them in every update.
2838   ///    Note however that instructions in this set may cause UB.
2839 
2840 protected:
2841   /// A set of all live instructions _known_ to cause UB.
2842   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2843 
2844 private:
2845   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2846   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2847 
2848   // Should be called on updates in which if we're processing an instruction
2849   // \p I that depends on a value \p V, one of the following has to happen:
2850   // - If the value is assumed, then stop.
2851   // - If the value is known but undef, then consider it UB.
2852   // - Otherwise, do specific processing with the simplified value.
2853   // We return None in the first 2 cases to signify that an appropriate
2854   // action was taken and the caller should stop.
2855   // Otherwise, we return the simplified value that the caller should
2856   // use for specific processing.
2857   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2858                                          Instruction *I) {
2859     bool UsedAssumedInformation = false;
2860     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2861         IRPosition::value(*V), *this, UsedAssumedInformation);
2862     if (!UsedAssumedInformation) {
2863       // Don't depend on assumed values.
2864       if (!SimplifiedV.hasValue()) {
2865         // If it is known (which we tested above) but it doesn't have a value,
2866         // then we can assume `undef` and hence the instruction is UB.
2867         KnownUBInsts.insert(I);
2868         return llvm::None;
2869       }
2870       if (!SimplifiedV.getValue())
2871         return nullptr;
2872       V = *SimplifiedV;
2873     }
2874     if (isa<UndefValue>(V)) {
2875       KnownUBInsts.insert(I);
2876       return llvm::None;
2877     }
2878     return V;
2879   }
2880 };
2881 
2882 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2883   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2884       : AAUndefinedBehaviorImpl(IRP, A) {}
2885 
2886   /// See AbstractAttribute::trackStatistics()
2887   void trackStatistics() const override {
2888     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2889                "Number of instructions known to have UB");
2890     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2891         KnownUBInsts.size();
2892   }
2893 };
2894 
2895 /// ------------------------ Will-Return Attributes ----------------------------
2896 
2897 // Helper function that checks whether a function has any cycle which we don't
2898 // know if it is bounded or not.
2899 // Loops with maximum trip count are considered bounded, any other cycle not.
2900 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2901   ScalarEvolution *SE =
2902       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2903   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2904   // If either SCEV or LoopInfo is not available for the function then we assume
2905   // any cycle to be unbounded cycle.
2906   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2907   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2908   if (!SE || !LI) {
2909     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2910       if (SCCI.hasCycle())
2911         return true;
2912     return false;
2913   }
2914 
2915   // If there's irreducible control, the function may contain non-loop cycles.
2916   if (mayContainIrreducibleControl(F, LI))
2917     return true;
2918 
2919   // Any loop that does not have a max trip count is considered unbounded cycle.
2920   for (auto *L : LI->getLoopsInPreorder()) {
2921     if (!SE->getSmallConstantMaxTripCount(L))
2922       return true;
2923   }
2924   return false;
2925 }
2926 
2927 struct AAWillReturnImpl : public AAWillReturn {
2928   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2929       : AAWillReturn(IRP, A) {}
2930 
2931   /// See AbstractAttribute::initialize(...).
2932   void initialize(Attributor &A) override {
2933     AAWillReturn::initialize(A);
2934 
2935     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2936       indicateOptimisticFixpoint();
2937       return;
2938     }
2939   }
2940 
2941   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2942   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2943     // Check for `mustprogress` in the scope and the associated function which
2944     // might be different if this is a call site.
2945     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2946         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2947       return false;
2948 
2949     bool IsKnown;
2950     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2951       return IsKnown || !KnownOnly;
2952     return false;
2953   }
2954 
2955   /// See AbstractAttribute::updateImpl(...).
2956   ChangeStatus updateImpl(Attributor &A) override {
2957     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2958       return ChangeStatus::UNCHANGED;
2959 
2960     auto CheckForWillReturn = [&](Instruction &I) {
2961       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2962       const auto &WillReturnAA =
2963           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2964       if (WillReturnAA.isKnownWillReturn())
2965         return true;
2966       if (!WillReturnAA.isAssumedWillReturn())
2967         return false;
2968       const auto &NoRecurseAA =
2969           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2970       return NoRecurseAA.isAssumedNoRecurse();
2971     };
2972 
2973     bool UsedAssumedInformation = false;
2974     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2975                                            UsedAssumedInformation))
2976       return indicatePessimisticFixpoint();
2977 
2978     return ChangeStatus::UNCHANGED;
2979   }
2980 
2981   /// See AbstractAttribute::getAsStr()
2982   const std::string getAsStr() const override {
2983     return getAssumed() ? "willreturn" : "may-noreturn";
2984   }
2985 };
2986 
2987 struct AAWillReturnFunction final : AAWillReturnImpl {
2988   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2989       : AAWillReturnImpl(IRP, A) {}
2990 
2991   /// See AbstractAttribute::initialize(...).
2992   void initialize(Attributor &A) override {
2993     AAWillReturnImpl::initialize(A);
2994 
2995     Function *F = getAnchorScope();
2996     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2997       indicatePessimisticFixpoint();
2998   }
2999 
3000   /// See AbstractAttribute::trackStatistics()
3001   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
3002 };
3003 
3004 /// WillReturn attribute deduction for a call sites.
3005 struct AAWillReturnCallSite final : AAWillReturnImpl {
3006   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
3007       : AAWillReturnImpl(IRP, A) {}
3008 
3009   /// See AbstractAttribute::initialize(...).
3010   void initialize(Attributor &A) override {
3011     AAWillReturnImpl::initialize(A);
3012     Function *F = getAssociatedFunction();
3013     if (!F || !A.isFunctionIPOAmendable(*F))
3014       indicatePessimisticFixpoint();
3015   }
3016 
3017   /// See AbstractAttribute::updateImpl(...).
3018   ChangeStatus updateImpl(Attributor &A) override {
3019     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
3020       return ChangeStatus::UNCHANGED;
3021 
3022     // TODO: Once we have call site specific value information we can provide
3023     //       call site specific liveness information and then it makes
3024     //       sense to specialize attributes for call sites arguments instead of
3025     //       redirecting requests to the callee argument.
3026     Function *F = getAssociatedFunction();
3027     const IRPosition &FnPos = IRPosition::function(*F);
3028     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
3029     return clampStateAndIndicateChange(getState(), FnAA.getState());
3030   }
3031 
3032   /// See AbstractAttribute::trackStatistics()
3033   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
3034 };
3035 
3036 /// -------------------AAReachability Attribute--------------------------
3037 
3038 struct AAReachabilityImpl : AAReachability {
3039   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
3040       : AAReachability(IRP, A) {}
3041 
3042   const std::string getAsStr() const override {
3043     // TODO: Return the number of reachable queries.
3044     return "reachable";
3045   }
3046 
3047   /// See AbstractAttribute::updateImpl(...).
3048   ChangeStatus updateImpl(Attributor &A) override {
3049     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
3050         *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED);
3051     if (!NoRecurseAA.isAssumedNoRecurse())
3052       return indicatePessimisticFixpoint();
3053     return ChangeStatus::UNCHANGED;
3054   }
3055 };
3056 
3057 struct AAReachabilityFunction final : public AAReachabilityImpl {
3058   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
3059       : AAReachabilityImpl(IRP, A) {}
3060 
3061   /// See AbstractAttribute::trackStatistics()
3062   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
3063 };
3064 
3065 /// ------------------------ NoAlias Argument Attribute ------------------------
3066 
3067 struct AANoAliasImpl : AANoAlias {
3068   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
3069     assert(getAssociatedType()->isPointerTy() &&
3070            "Noalias is a pointer attribute");
3071   }
3072 
3073   const std::string getAsStr() const override {
3074     return getAssumed() ? "noalias" : "may-alias";
3075   }
3076 };
3077 
3078 /// NoAlias attribute for a floating value.
3079 struct AANoAliasFloating final : AANoAliasImpl {
3080   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3081       : AANoAliasImpl(IRP, A) {}
3082 
3083   /// See AbstractAttribute::initialize(...).
3084   void initialize(Attributor &A) override {
3085     AANoAliasImpl::initialize(A);
3086     Value *Val = &getAssociatedValue();
3087     do {
3088       CastInst *CI = dyn_cast<CastInst>(Val);
3089       if (!CI)
3090         break;
3091       Value *Base = CI->getOperand(0);
3092       if (!Base->hasOneUse())
3093         break;
3094       Val = Base;
3095     } while (true);
3096 
3097     if (!Val->getType()->isPointerTy()) {
3098       indicatePessimisticFixpoint();
3099       return;
3100     }
3101 
3102     if (isa<AllocaInst>(Val))
3103       indicateOptimisticFixpoint();
3104     else if (isa<ConstantPointerNull>(Val) &&
3105              !NullPointerIsDefined(getAnchorScope(),
3106                                    Val->getType()->getPointerAddressSpace()))
3107       indicateOptimisticFixpoint();
3108     else if (Val != &getAssociatedValue()) {
3109       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3110           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3111       if (ValNoAliasAA.isKnownNoAlias())
3112         indicateOptimisticFixpoint();
3113     }
3114   }
3115 
3116   /// See AbstractAttribute::updateImpl(...).
3117   ChangeStatus updateImpl(Attributor &A) override {
3118     // TODO: Implement this.
3119     return indicatePessimisticFixpoint();
3120   }
3121 
3122   /// See AbstractAttribute::trackStatistics()
3123   void trackStatistics() const override {
3124     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3125   }
3126 };
3127 
3128 /// NoAlias attribute for an argument.
3129 struct AANoAliasArgument final
3130     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3131   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3132   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3133 
3134   /// See AbstractAttribute::initialize(...).
3135   void initialize(Attributor &A) override {
3136     Base::initialize(A);
3137     // See callsite argument attribute and callee argument attribute.
3138     if (hasAttr({Attribute::ByVal}))
3139       indicateOptimisticFixpoint();
3140   }
3141 
3142   /// See AbstractAttribute::update(...).
3143   ChangeStatus updateImpl(Attributor &A) override {
3144     // We have to make sure no-alias on the argument does not break
3145     // synchronization when this is a callback argument, see also [1] below.
3146     // If synchronization cannot be affected, we delegate to the base updateImpl
3147     // function, otherwise we give up for now.
3148 
3149     // If the function is no-sync, no-alias cannot break synchronization.
3150     const auto &NoSyncAA =
3151         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3152                              DepClassTy::OPTIONAL);
3153     if (NoSyncAA.isAssumedNoSync())
3154       return Base::updateImpl(A);
3155 
3156     // If the argument is read-only, no-alias cannot break synchronization.
3157     bool IsKnown;
3158     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3159       return Base::updateImpl(A);
3160 
3161     // If the argument is never passed through callbacks, no-alias cannot break
3162     // synchronization.
3163     bool UsedAssumedInformation = false;
3164     if (A.checkForAllCallSites(
3165             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3166             true, UsedAssumedInformation))
3167       return Base::updateImpl(A);
3168 
3169     // TODO: add no-alias but make sure it doesn't break synchronization by
3170     // introducing fake uses. See:
3171     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3172     //     International Workshop on OpenMP 2018,
3173     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3174 
3175     return indicatePessimisticFixpoint();
3176   }
3177 
3178   /// See AbstractAttribute::trackStatistics()
3179   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3180 };
3181 
3182 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3183   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3184       : AANoAliasImpl(IRP, A) {}
3185 
3186   /// See AbstractAttribute::initialize(...).
3187   void initialize(Attributor &A) override {
3188     // See callsite argument attribute and callee argument attribute.
3189     const auto &CB = cast<CallBase>(getAnchorValue());
3190     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3191       indicateOptimisticFixpoint();
3192     Value &Val = getAssociatedValue();
3193     if (isa<ConstantPointerNull>(Val) &&
3194         !NullPointerIsDefined(getAnchorScope(),
3195                               Val.getType()->getPointerAddressSpace()))
3196       indicateOptimisticFixpoint();
3197   }
3198 
3199   /// Determine if the underlying value may alias with the call site argument
3200   /// \p OtherArgNo of \p ICS (= the underlying call site).
3201   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3202                             const AAMemoryBehavior &MemBehaviorAA,
3203                             const CallBase &CB, unsigned OtherArgNo) {
3204     // We do not need to worry about aliasing with the underlying IRP.
3205     if (this->getCalleeArgNo() == (int)OtherArgNo)
3206       return false;
3207 
3208     // If it is not a pointer or pointer vector we do not alias.
3209     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3210     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3211       return false;
3212 
3213     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3214         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3215 
3216     // If the argument is readnone, there is no read-write aliasing.
3217     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3218       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3219       return false;
3220     }
3221 
3222     // If the argument is readonly and the underlying value is readonly, there
3223     // is no read-write aliasing.
3224     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3225     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3226       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3227       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3228       return false;
3229     }
3230 
3231     // We have to utilize actual alias analysis queries so we need the object.
3232     if (!AAR)
3233       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3234 
3235     // Try to rule it out at the call site.
3236     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3237     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3238                          "callsite arguments: "
3239                       << getAssociatedValue() << " " << *ArgOp << " => "
3240                       << (IsAliasing ? "" : "no-") << "alias \n");
3241 
3242     return IsAliasing;
3243   }
3244 
3245   bool
3246   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3247                                          const AAMemoryBehavior &MemBehaviorAA,
3248                                          const AANoAlias &NoAliasAA) {
3249     // We can deduce "noalias" if the following conditions hold.
3250     // (i)   Associated value is assumed to be noalias in the definition.
3251     // (ii)  Associated value is assumed to be no-capture in all the uses
3252     //       possibly executed before this callsite.
3253     // (iii) There is no other pointer argument which could alias with the
3254     //       value.
3255 
3256     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3257     if (!AssociatedValueIsNoAliasAtDef) {
3258       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3259                         << " is not no-alias at the definition\n");
3260       return false;
3261     }
3262 
3263     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3264 
3265     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3266     const Function *ScopeFn = VIRP.getAnchorScope();
3267     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3268     // Check whether the value is captured in the scope using AANoCapture.
3269     //      Look at CFG and check only uses possibly executed before this
3270     //      callsite.
3271     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3272       Instruction *UserI = cast<Instruction>(U.getUser());
3273 
3274       // If UserI is the curr instruction and there is a single potential use of
3275       // the value in UserI we allow the use.
3276       // TODO: We should inspect the operands and allow those that cannot alias
3277       //       with the value.
3278       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3279         return true;
3280 
3281       if (ScopeFn) {
3282         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3283             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3284 
3285         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3286           return true;
3287 
3288         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3289           if (CB->isArgOperand(&U)) {
3290 
3291             unsigned ArgNo = CB->getArgOperandNo(&U);
3292 
3293             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3294                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3295                 DepClassTy::OPTIONAL);
3296 
3297             if (NoCaptureAA.isAssumedNoCapture())
3298               return true;
3299           }
3300         }
3301       }
3302 
3303       // For cases which can potentially have more users
3304       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3305           isa<SelectInst>(U)) {
3306         Follow = true;
3307         return true;
3308       }
3309 
3310       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3311       return false;
3312     };
3313 
3314     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3315       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3316         LLVM_DEBUG(
3317             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3318                    << " cannot be noalias as it is potentially captured\n");
3319         return false;
3320       }
3321     }
3322     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3323 
3324     // Check there is no other pointer argument which could alias with the
3325     // value passed at this call site.
3326     // TODO: AbstractCallSite
3327     const auto &CB = cast<CallBase>(getAnchorValue());
3328     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3329       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3330         return false;
3331 
3332     return true;
3333   }
3334 
3335   /// See AbstractAttribute::updateImpl(...).
3336   ChangeStatus updateImpl(Attributor &A) override {
3337     // If the argument is readnone we are done as there are no accesses via the
3338     // argument.
3339     auto &MemBehaviorAA =
3340         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3341     if (MemBehaviorAA.isAssumedReadNone()) {
3342       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3343       return ChangeStatus::UNCHANGED;
3344     }
3345 
3346     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3347     const auto &NoAliasAA =
3348         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3349 
3350     AAResults *AAR = nullptr;
3351     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3352                                                NoAliasAA)) {
3353       LLVM_DEBUG(
3354           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3355       return ChangeStatus::UNCHANGED;
3356     }
3357 
3358     return indicatePessimisticFixpoint();
3359   }
3360 
3361   /// See AbstractAttribute::trackStatistics()
3362   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3363 };
3364 
3365 /// NoAlias attribute for function return value.
3366 struct AANoAliasReturned final : AANoAliasImpl {
3367   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3368       : AANoAliasImpl(IRP, A) {}
3369 
3370   /// See AbstractAttribute::initialize(...).
3371   void initialize(Attributor &A) override {
3372     AANoAliasImpl::initialize(A);
3373     Function *F = getAssociatedFunction();
3374     if (!F || F->isDeclaration())
3375       indicatePessimisticFixpoint();
3376   }
3377 
3378   /// See AbstractAttribute::updateImpl(...).
3379   virtual ChangeStatus updateImpl(Attributor &A) override {
3380 
3381     auto CheckReturnValue = [&](Value &RV) -> bool {
3382       if (Constant *C = dyn_cast<Constant>(&RV))
3383         if (C->isNullValue() || isa<UndefValue>(C))
3384           return true;
3385 
3386       /// For now, we can only deduce noalias if we have call sites.
3387       /// FIXME: add more support.
3388       if (!isa<CallBase>(&RV))
3389         return false;
3390 
3391       const IRPosition &RVPos = IRPosition::value(RV);
3392       const auto &NoAliasAA =
3393           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3394       if (!NoAliasAA.isAssumedNoAlias())
3395         return false;
3396 
3397       const auto &NoCaptureAA =
3398           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3399       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3400     };
3401 
3402     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3403       return indicatePessimisticFixpoint();
3404 
3405     return ChangeStatus::UNCHANGED;
3406   }
3407 
3408   /// See AbstractAttribute::trackStatistics()
3409   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3410 };
3411 
3412 /// NoAlias attribute deduction for a call site return value.
3413 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3414   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3415       : AANoAliasImpl(IRP, A) {}
3416 
3417   /// See AbstractAttribute::initialize(...).
3418   void initialize(Attributor &A) override {
3419     AANoAliasImpl::initialize(A);
3420     Function *F = getAssociatedFunction();
3421     if (!F || F->isDeclaration())
3422       indicatePessimisticFixpoint();
3423   }
3424 
3425   /// See AbstractAttribute::updateImpl(...).
3426   ChangeStatus updateImpl(Attributor &A) override {
3427     // TODO: Once we have call site specific value information we can provide
3428     //       call site specific liveness information and then it makes
3429     //       sense to specialize attributes for call sites arguments instead of
3430     //       redirecting requests to the callee argument.
3431     Function *F = getAssociatedFunction();
3432     const IRPosition &FnPos = IRPosition::returned(*F);
3433     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3434     return clampStateAndIndicateChange(getState(), FnAA.getState());
3435   }
3436 
3437   /// See AbstractAttribute::trackStatistics()
3438   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3439 };
3440 
3441 /// -------------------AAIsDead Function Attribute-----------------------
3442 
3443 struct AAIsDeadValueImpl : public AAIsDead {
3444   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3445 
3446   /// See AAIsDead::isAssumedDead().
3447   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3448 
3449   /// See AAIsDead::isKnownDead().
3450   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3451 
3452   /// See AAIsDead::isAssumedDead(BasicBlock *).
3453   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3454 
3455   /// See AAIsDead::isKnownDead(BasicBlock *).
3456   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3457 
3458   /// See AAIsDead::isAssumedDead(Instruction *I).
3459   bool isAssumedDead(const Instruction *I) const override {
3460     return I == getCtxI() && isAssumedDead();
3461   }
3462 
3463   /// See AAIsDead::isKnownDead(Instruction *I).
3464   bool isKnownDead(const Instruction *I) const override {
3465     return isAssumedDead(I) && isKnownDead();
3466   }
3467 
3468   /// See AbstractAttribute::getAsStr().
3469   const std::string getAsStr() const override {
3470     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3471   }
3472 
3473   /// Check if all uses are assumed dead.
3474   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3475     // Callers might not check the type, void has no uses.
3476     if (V.getType()->isVoidTy())
3477       return true;
3478 
3479     // If we replace a value with a constant there are no uses left afterwards.
3480     if (!isa<Constant>(V)) {
3481       bool UsedAssumedInformation = false;
3482       Optional<Constant *> C =
3483           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3484       if (!C.hasValue() || *C)
3485         return true;
3486     }
3487 
3488     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3489     // Explicitly set the dependence class to required because we want a long
3490     // chain of N dependent instructions to be considered live as soon as one is
3491     // without going through N update cycles. This is not required for
3492     // correctness.
3493     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3494                              DepClassTy::REQUIRED);
3495   }
3496 
3497   /// Determine if \p I is assumed to be side-effect free.
3498   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3499     if (!I || wouldInstructionBeTriviallyDead(I))
3500       return true;
3501 
3502     auto *CB = dyn_cast<CallBase>(I);
3503     if (!CB || isa<IntrinsicInst>(CB))
3504       return false;
3505 
3506     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3507     const auto &NoUnwindAA =
3508         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3509     if (!NoUnwindAA.isAssumedNoUnwind())
3510       return false;
3511     if (!NoUnwindAA.isKnownNoUnwind())
3512       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3513 
3514     bool IsKnown;
3515     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3516   }
3517 };
3518 
3519 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3520   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3521       : AAIsDeadValueImpl(IRP, A) {}
3522 
3523   /// See AbstractAttribute::initialize(...).
3524   void initialize(Attributor &A) override {
3525     if (isa<UndefValue>(getAssociatedValue())) {
3526       indicatePessimisticFixpoint();
3527       return;
3528     }
3529 
3530     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3531     if (!isAssumedSideEffectFree(A, I)) {
3532       if (!isa_and_nonnull<StoreInst>(I))
3533         indicatePessimisticFixpoint();
3534       else
3535         removeAssumedBits(HAS_NO_EFFECT);
3536     }
3537   }
3538 
3539   bool isDeadStore(Attributor &A, StoreInst &SI) {
3540     // Lang ref now states volatile store is not UB/dead, let's skip them.
3541     if (SI.isVolatile())
3542       return false;
3543 
3544     bool UsedAssumedInformation = false;
3545     SmallSetVector<Value *, 4> PotentialCopies;
3546     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3547                                              UsedAssumedInformation))
3548       return false;
3549     return llvm::all_of(PotentialCopies, [&](Value *V) {
3550       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3551                              UsedAssumedInformation);
3552     });
3553   }
3554 
3555   /// See AbstractAttribute::updateImpl(...).
3556   ChangeStatus updateImpl(Attributor &A) override {
3557     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3558     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3559       if (!isDeadStore(A, *SI))
3560         return indicatePessimisticFixpoint();
3561     } else {
3562       if (!isAssumedSideEffectFree(A, I))
3563         return indicatePessimisticFixpoint();
3564       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3565         return indicatePessimisticFixpoint();
3566     }
3567     return ChangeStatus::UNCHANGED;
3568   }
3569 
3570   /// See AbstractAttribute::manifest(...).
3571   ChangeStatus manifest(Attributor &A) override {
3572     Value &V = getAssociatedValue();
3573     if (auto *I = dyn_cast<Instruction>(&V)) {
3574       // If we get here we basically know the users are all dead. We check if
3575       // isAssumedSideEffectFree returns true here again because it might not be
3576       // the case and only the users are dead but the instruction (=call) is
3577       // still needed.
3578       if (isa<StoreInst>(I) ||
3579           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3580         A.deleteAfterManifest(*I);
3581         return ChangeStatus::CHANGED;
3582       }
3583     }
3584     if (V.use_empty())
3585       return ChangeStatus::UNCHANGED;
3586 
3587     bool UsedAssumedInformation = false;
3588     Optional<Constant *> C =
3589         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3590     if (C.hasValue() && C.getValue())
3591       return ChangeStatus::UNCHANGED;
3592 
3593     // Replace the value with undef as it is dead but keep droppable uses around
3594     // as they provide information we don't want to give up on just yet.
3595     UndefValue &UV = *UndefValue::get(V.getType());
3596     bool AnyChange =
3597         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3598     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3599   }
3600 
3601   /// See AbstractAttribute::trackStatistics()
3602   void trackStatistics() const override {
3603     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3604   }
3605 };
3606 
3607 struct AAIsDeadArgument : public AAIsDeadFloating {
3608   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3609       : AAIsDeadFloating(IRP, A) {}
3610 
3611   /// See AbstractAttribute::initialize(...).
3612   void initialize(Attributor &A) override {
3613     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3614       indicatePessimisticFixpoint();
3615   }
3616 
3617   /// See AbstractAttribute::manifest(...).
3618   ChangeStatus manifest(Attributor &A) override {
3619     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3620     Argument &Arg = *getAssociatedArgument();
3621     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3622       if (A.registerFunctionSignatureRewrite(
3623               Arg, /* ReplacementTypes */ {},
3624               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3625               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3626         Arg.dropDroppableUses();
3627         return ChangeStatus::CHANGED;
3628       }
3629     return Changed;
3630   }
3631 
3632   /// See AbstractAttribute::trackStatistics()
3633   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3634 };
3635 
3636 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3637   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3638       : AAIsDeadValueImpl(IRP, A) {}
3639 
3640   /// See AbstractAttribute::initialize(...).
3641   void initialize(Attributor &A) override {
3642     if (isa<UndefValue>(getAssociatedValue()))
3643       indicatePessimisticFixpoint();
3644   }
3645 
3646   /// See AbstractAttribute::updateImpl(...).
3647   ChangeStatus updateImpl(Attributor &A) override {
3648     // TODO: Once we have call site specific value information we can provide
3649     //       call site specific liveness information and then it makes
3650     //       sense to specialize attributes for call sites arguments instead of
3651     //       redirecting requests to the callee argument.
3652     Argument *Arg = getAssociatedArgument();
3653     if (!Arg)
3654       return indicatePessimisticFixpoint();
3655     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3656     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3657     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3658   }
3659 
3660   /// See AbstractAttribute::manifest(...).
3661   ChangeStatus manifest(Attributor &A) override {
3662     CallBase &CB = cast<CallBase>(getAnchorValue());
3663     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3664     assert(!isa<UndefValue>(U.get()) &&
3665            "Expected undef values to be filtered out!");
3666     UndefValue &UV = *UndefValue::get(U->getType());
3667     if (A.changeUseAfterManifest(U, UV))
3668       return ChangeStatus::CHANGED;
3669     return ChangeStatus::UNCHANGED;
3670   }
3671 
3672   /// See AbstractAttribute::trackStatistics()
3673   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3674 };
3675 
3676 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3677   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3678       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3679 
3680   /// See AAIsDead::isAssumedDead().
3681   bool isAssumedDead() const override {
3682     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3683   }
3684 
3685   /// See AbstractAttribute::initialize(...).
3686   void initialize(Attributor &A) override {
3687     if (isa<UndefValue>(getAssociatedValue())) {
3688       indicatePessimisticFixpoint();
3689       return;
3690     }
3691 
3692     // We track this separately as a secondary state.
3693     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3694   }
3695 
3696   /// See AbstractAttribute::updateImpl(...).
3697   ChangeStatus updateImpl(Attributor &A) override {
3698     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3699     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3700       IsAssumedSideEffectFree = false;
3701       Changed = ChangeStatus::CHANGED;
3702     }
3703     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3704       return indicatePessimisticFixpoint();
3705     return Changed;
3706   }
3707 
3708   /// See AbstractAttribute::trackStatistics()
3709   void trackStatistics() const override {
3710     if (IsAssumedSideEffectFree)
3711       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3712     else
3713       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3714   }
3715 
3716   /// See AbstractAttribute::getAsStr().
3717   const std::string getAsStr() const override {
3718     return isAssumedDead()
3719                ? "assumed-dead"
3720                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3721   }
3722 
3723 private:
3724   bool IsAssumedSideEffectFree;
3725 };
3726 
3727 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3728   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3729       : AAIsDeadValueImpl(IRP, A) {}
3730 
3731   /// See AbstractAttribute::updateImpl(...).
3732   ChangeStatus updateImpl(Attributor &A) override {
3733 
3734     bool UsedAssumedInformation = false;
3735     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3736                               {Instruction::Ret}, UsedAssumedInformation);
3737 
3738     auto PredForCallSite = [&](AbstractCallSite ACS) {
3739       if (ACS.isCallbackCall() || !ACS.getInstruction())
3740         return false;
3741       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3742     };
3743 
3744     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3745                                 UsedAssumedInformation))
3746       return indicatePessimisticFixpoint();
3747 
3748     return ChangeStatus::UNCHANGED;
3749   }
3750 
3751   /// See AbstractAttribute::manifest(...).
3752   ChangeStatus manifest(Attributor &A) override {
3753     // TODO: Rewrite the signature to return void?
3754     bool AnyChange = false;
3755     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3756     auto RetInstPred = [&](Instruction &I) {
3757       ReturnInst &RI = cast<ReturnInst>(I);
3758       if (!isa<UndefValue>(RI.getReturnValue()))
3759         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3760       return true;
3761     };
3762     bool UsedAssumedInformation = false;
3763     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3764                               UsedAssumedInformation);
3765     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3766   }
3767 
3768   /// See AbstractAttribute::trackStatistics()
3769   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3770 };
3771 
3772 struct AAIsDeadFunction : public AAIsDead {
3773   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3774 
3775   /// See AbstractAttribute::initialize(...).
3776   void initialize(Attributor &A) override {
3777     const Function *F = getAnchorScope();
3778     if (F && !F->isDeclaration()) {
3779       // We only want to compute liveness once. If the function is not part of
3780       // the SCC, skip it.
3781       if (A.isRunOn(*const_cast<Function *>(F))) {
3782         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3783         assumeLive(A, F->getEntryBlock());
3784       } else {
3785         indicatePessimisticFixpoint();
3786       }
3787     }
3788   }
3789 
3790   /// See AbstractAttribute::getAsStr().
3791   const std::string getAsStr() const override {
3792     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3793            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3794            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3795            std::to_string(KnownDeadEnds.size()) + "]";
3796   }
3797 
3798   /// See AbstractAttribute::manifest(...).
3799   ChangeStatus manifest(Attributor &A) override {
3800     assert(getState().isValidState() &&
3801            "Attempted to manifest an invalid state!");
3802 
3803     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3804     Function &F = *getAnchorScope();
3805 
3806     if (AssumedLiveBlocks.empty()) {
3807       A.deleteAfterManifest(F);
3808       return ChangeStatus::CHANGED;
3809     }
3810 
3811     // Flag to determine if we can change an invoke to a call assuming the
3812     // callee is nounwind. This is not possible if the personality of the
3813     // function allows to catch asynchronous exceptions.
3814     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3815 
3816     KnownDeadEnds.set_union(ToBeExploredFrom);
3817     for (const Instruction *DeadEndI : KnownDeadEnds) {
3818       auto *CB = dyn_cast<CallBase>(DeadEndI);
3819       if (!CB)
3820         continue;
3821       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3822           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3823       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3824       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3825         continue;
3826 
3827       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3828         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3829       else
3830         A.changeToUnreachableAfterManifest(
3831             const_cast<Instruction *>(DeadEndI->getNextNode()));
3832       HasChanged = ChangeStatus::CHANGED;
3833     }
3834 
3835     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3836     for (BasicBlock &BB : F)
3837       if (!AssumedLiveBlocks.count(&BB)) {
3838         A.deleteAfterManifest(BB);
3839         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3840         HasChanged = ChangeStatus::CHANGED;
3841       }
3842 
3843     return HasChanged;
3844   }
3845 
3846   /// See AbstractAttribute::updateImpl(...).
3847   ChangeStatus updateImpl(Attributor &A) override;
3848 
3849   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3850     assert(From->getParent() == getAnchorScope() &&
3851            To->getParent() == getAnchorScope() &&
3852            "Used AAIsDead of the wrong function");
3853     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3854   }
3855 
3856   /// See AbstractAttribute::trackStatistics()
3857   void trackStatistics() const override {}
3858 
3859   /// Returns true if the function is assumed dead.
3860   bool isAssumedDead() const override { return false; }
3861 
3862   /// See AAIsDead::isKnownDead().
3863   bool isKnownDead() const override { return false; }
3864 
3865   /// See AAIsDead::isAssumedDead(BasicBlock *).
3866   bool isAssumedDead(const BasicBlock *BB) const override {
3867     assert(BB->getParent() == getAnchorScope() &&
3868            "BB must be in the same anchor scope function.");
3869 
3870     if (!getAssumed())
3871       return false;
3872     return !AssumedLiveBlocks.count(BB);
3873   }
3874 
3875   /// See AAIsDead::isKnownDead(BasicBlock *).
3876   bool isKnownDead(const BasicBlock *BB) const override {
3877     return getKnown() && isAssumedDead(BB);
3878   }
3879 
3880   /// See AAIsDead::isAssumed(Instruction *I).
3881   bool isAssumedDead(const Instruction *I) const override {
3882     assert(I->getParent()->getParent() == getAnchorScope() &&
3883            "Instruction must be in the same anchor scope function.");
3884 
3885     if (!getAssumed())
3886       return false;
3887 
3888     // If it is not in AssumedLiveBlocks then it for sure dead.
3889     // Otherwise, it can still be after noreturn call in a live block.
3890     if (!AssumedLiveBlocks.count(I->getParent()))
3891       return true;
3892 
3893     // If it is not after a liveness barrier it is live.
3894     const Instruction *PrevI = I->getPrevNode();
3895     while (PrevI) {
3896       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3897         return true;
3898       PrevI = PrevI->getPrevNode();
3899     }
3900     return false;
3901   }
3902 
3903   /// See AAIsDead::isKnownDead(Instruction *I).
3904   bool isKnownDead(const Instruction *I) const override {
3905     return getKnown() && isAssumedDead(I);
3906   }
3907 
3908   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3909   /// that internal function called from \p BB should now be looked at.
3910   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3911     if (!AssumedLiveBlocks.insert(&BB).second)
3912       return false;
3913 
3914     // We assume that all of BB is (probably) live now and if there are calls to
3915     // internal functions we will assume that those are now live as well. This
3916     // is a performance optimization for blocks with calls to a lot of internal
3917     // functions. It can however cause dead functions to be treated as live.
3918     for (const Instruction &I : BB)
3919       if (const auto *CB = dyn_cast<CallBase>(&I))
3920         if (const Function *F = CB->getCalledFunction())
3921           if (F->hasLocalLinkage())
3922             A.markLiveInternalFunction(*F);
3923     return true;
3924   }
3925 
3926   /// Collection of instructions that need to be explored again, e.g., we
3927   /// did assume they do not transfer control to (one of their) successors.
3928   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3929 
3930   /// Collection of instructions that are known to not transfer control.
3931   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3932 
3933   /// Collection of all assumed live edges
3934   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3935 
3936   /// Collection of all assumed live BasicBlocks.
3937   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3938 };
3939 
3940 static bool
3941 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3942                         AbstractAttribute &AA,
3943                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3944   const IRPosition &IPos = IRPosition::callsite_function(CB);
3945 
3946   const auto &NoReturnAA =
3947       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3948   if (NoReturnAA.isAssumedNoReturn())
3949     return !NoReturnAA.isKnownNoReturn();
3950   if (CB.isTerminator())
3951     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3952   else
3953     AliveSuccessors.push_back(CB.getNextNode());
3954   return false;
3955 }
3956 
3957 static bool
3958 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3959                         AbstractAttribute &AA,
3960                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3961   bool UsedAssumedInformation =
3962       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3963 
3964   // First, determine if we can change an invoke to a call assuming the
3965   // callee is nounwind. This is not possible if the personality of the
3966   // function allows to catch asynchronous exceptions.
3967   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3968     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3969   } else {
3970     const IRPosition &IPos = IRPosition::callsite_function(II);
3971     const auto &AANoUnw =
3972         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3973     if (AANoUnw.isAssumedNoUnwind()) {
3974       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3975     } else {
3976       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3977     }
3978   }
3979   return UsedAssumedInformation;
3980 }
3981 
3982 static bool
3983 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3984                         AbstractAttribute &AA,
3985                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3986   bool UsedAssumedInformation = false;
3987   if (BI.getNumSuccessors() == 1) {
3988     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3989   } else {
3990     Optional<Constant *> C =
3991         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3992     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3993       // No value yet, assume both edges are dead.
3994     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3995       const BasicBlock *SuccBB =
3996           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3997       AliveSuccessors.push_back(&SuccBB->front());
3998     } else {
3999       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
4000       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
4001       UsedAssumedInformation = false;
4002     }
4003   }
4004   return UsedAssumedInformation;
4005 }
4006 
4007 static bool
4008 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
4009                         AbstractAttribute &AA,
4010                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
4011   bool UsedAssumedInformation = false;
4012   Optional<Constant *> C =
4013       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
4014   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
4015     // No value yet, assume all edges are dead.
4016   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
4017     for (auto &CaseIt : SI.cases()) {
4018       if (CaseIt.getCaseValue() == C.getValue()) {
4019         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
4020         return UsedAssumedInformation;
4021       }
4022     }
4023     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
4024     return UsedAssumedInformation;
4025   } else {
4026     for (const BasicBlock *SuccBB : successors(SI.getParent()))
4027       AliveSuccessors.push_back(&SuccBB->front());
4028   }
4029   return UsedAssumedInformation;
4030 }
4031 
4032 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
4033   ChangeStatus Change = ChangeStatus::UNCHANGED;
4034 
4035   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
4036                     << getAnchorScope()->size() << "] BBs and "
4037                     << ToBeExploredFrom.size() << " exploration points and "
4038                     << KnownDeadEnds.size() << " known dead ends\n");
4039 
4040   // Copy and clear the list of instructions we need to explore from. It is
4041   // refilled with instructions the next update has to look at.
4042   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
4043                                                ToBeExploredFrom.end());
4044   decltype(ToBeExploredFrom) NewToBeExploredFrom;
4045 
4046   SmallVector<const Instruction *, 8> AliveSuccessors;
4047   while (!Worklist.empty()) {
4048     const Instruction *I = Worklist.pop_back_val();
4049     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
4050 
4051     // Fast forward for uninteresting instructions. We could look for UB here
4052     // though.
4053     while (!I->isTerminator() && !isa<CallBase>(I))
4054       I = I->getNextNode();
4055 
4056     AliveSuccessors.clear();
4057 
4058     bool UsedAssumedInformation = false;
4059     switch (I->getOpcode()) {
4060     // TODO: look for (assumed) UB to backwards propagate "deadness".
4061     default:
4062       assert(I->isTerminator() &&
4063              "Expected non-terminators to be handled already!");
4064       for (const BasicBlock *SuccBB : successors(I->getParent()))
4065         AliveSuccessors.push_back(&SuccBB->front());
4066       break;
4067     case Instruction::Call:
4068       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4069                                                        *this, AliveSuccessors);
4070       break;
4071     case Instruction::Invoke:
4072       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4073                                                        *this, AliveSuccessors);
4074       break;
4075     case Instruction::Br:
4076       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4077                                                        *this, AliveSuccessors);
4078       break;
4079     case Instruction::Switch:
4080       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4081                                                        *this, AliveSuccessors);
4082       break;
4083     }
4084 
4085     if (UsedAssumedInformation) {
4086       NewToBeExploredFrom.insert(I);
4087     } else if (AliveSuccessors.empty() ||
4088                (I->isTerminator() &&
4089                 AliveSuccessors.size() < I->getNumSuccessors())) {
4090       if (KnownDeadEnds.insert(I))
4091         Change = ChangeStatus::CHANGED;
4092     }
4093 
4094     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4095                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4096                       << UsedAssumedInformation << "\n");
4097 
4098     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4099       if (!I->isTerminator()) {
4100         assert(AliveSuccessors.size() == 1 &&
4101                "Non-terminator expected to have a single successor!");
4102         Worklist.push_back(AliveSuccessor);
4103       } else {
4104         // record the assumed live edge
4105         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4106         if (AssumedLiveEdges.insert(Edge).second)
4107           Change = ChangeStatus::CHANGED;
4108         if (assumeLive(A, *AliveSuccessor->getParent()))
4109           Worklist.push_back(AliveSuccessor);
4110       }
4111     }
4112   }
4113 
4114   // Check if the content of ToBeExploredFrom changed, ignore the order.
4115   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4116       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4117         return !ToBeExploredFrom.count(I);
4118       })) {
4119     Change = ChangeStatus::CHANGED;
4120     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4121   }
4122 
4123   // If we know everything is live there is no need to query for liveness.
4124   // Instead, indicating a pessimistic fixpoint will cause the state to be
4125   // "invalid" and all queries to be answered conservatively without lookups.
4126   // To be in this state we have to (1) finished the exploration and (3) not
4127   // discovered any non-trivial dead end and (2) not ruled unreachable code
4128   // dead.
4129   if (ToBeExploredFrom.empty() &&
4130       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4131       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4132         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4133       }))
4134     return indicatePessimisticFixpoint();
4135   return Change;
4136 }
4137 
4138 /// Liveness information for a call sites.
4139 struct AAIsDeadCallSite final : AAIsDeadFunction {
4140   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4141       : AAIsDeadFunction(IRP, A) {}
4142 
4143   /// See AbstractAttribute::initialize(...).
4144   void initialize(Attributor &A) override {
4145     // TODO: Once we have call site specific value information we can provide
4146     //       call site specific liveness information and then it makes
4147     //       sense to specialize attributes for call sites instead of
4148     //       redirecting requests to the callee.
4149     llvm_unreachable("Abstract attributes for liveness are not "
4150                      "supported for call sites yet!");
4151   }
4152 
4153   /// See AbstractAttribute::updateImpl(...).
4154   ChangeStatus updateImpl(Attributor &A) override {
4155     return indicatePessimisticFixpoint();
4156   }
4157 
4158   /// See AbstractAttribute::trackStatistics()
4159   void trackStatistics() const override {}
4160 };
4161 
4162 /// -------------------- Dereferenceable Argument Attribute --------------------
4163 
4164 struct AADereferenceableImpl : AADereferenceable {
4165   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4166       : AADereferenceable(IRP, A) {}
4167   using StateType = DerefState;
4168 
4169   /// See AbstractAttribute::initialize(...).
4170   void initialize(Attributor &A) override {
4171     SmallVector<Attribute, 4> Attrs;
4172     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4173              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4174     for (const Attribute &Attr : Attrs)
4175       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4176 
4177     const IRPosition &IRP = this->getIRPosition();
4178     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4179 
4180     bool CanBeNull, CanBeFreed;
4181     takeKnownDerefBytesMaximum(
4182         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4183             A.getDataLayout(), CanBeNull, CanBeFreed));
4184 
4185     bool IsFnInterface = IRP.isFnInterfaceKind();
4186     Function *FnScope = IRP.getAnchorScope();
4187     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4188       indicatePessimisticFixpoint();
4189       return;
4190     }
4191 
4192     if (Instruction *CtxI = getCtxI())
4193       followUsesInMBEC(*this, A, getState(), *CtxI);
4194   }
4195 
4196   /// See AbstractAttribute::getState()
4197   /// {
4198   StateType &getState() override { return *this; }
4199   const StateType &getState() const override { return *this; }
4200   /// }
4201 
4202   /// Helper function for collecting accessed bytes in must-be-executed-context
4203   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4204                               DerefState &State) {
4205     const Value *UseV = U->get();
4206     if (!UseV->getType()->isPointerTy())
4207       return;
4208 
4209     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4210     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4211       return;
4212 
4213     int64_t Offset;
4214     const Value *Base = GetPointerBaseWithConstantOffset(
4215         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4216     if (Base && Base == &getAssociatedValue())
4217       State.addAccessedBytes(Offset, Loc->Size.getValue());
4218   }
4219 
4220   /// See followUsesInMBEC
4221   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4222                        AADereferenceable::StateType &State) {
4223     bool IsNonNull = false;
4224     bool TrackUse = false;
4225     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4226         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4227     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4228                       << " for instruction " << *I << "\n");
4229 
4230     addAccessedBytesForUse(A, U, I, State);
4231     State.takeKnownDerefBytesMaximum(DerefBytes);
4232     return TrackUse;
4233   }
4234 
4235   /// See AbstractAttribute::manifest(...).
4236   ChangeStatus manifest(Attributor &A) override {
4237     ChangeStatus Change = AADereferenceable::manifest(A);
4238     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4239       removeAttrs({Attribute::DereferenceableOrNull});
4240       return ChangeStatus::CHANGED;
4241     }
4242     return Change;
4243   }
4244 
4245   void getDeducedAttributes(LLVMContext &Ctx,
4246                             SmallVectorImpl<Attribute> &Attrs) const override {
4247     // TODO: Add *_globally support
4248     if (isAssumedNonNull())
4249       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4250           Ctx, getAssumedDereferenceableBytes()));
4251     else
4252       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4253           Ctx, getAssumedDereferenceableBytes()));
4254   }
4255 
4256   /// See AbstractAttribute::getAsStr().
4257   const std::string getAsStr() const override {
4258     if (!getAssumedDereferenceableBytes())
4259       return "unknown-dereferenceable";
4260     return std::string("dereferenceable") +
4261            (isAssumedNonNull() ? "" : "_or_null") +
4262            (isAssumedGlobal() ? "_globally" : "") + "<" +
4263            std::to_string(getKnownDereferenceableBytes()) + "-" +
4264            std::to_string(getAssumedDereferenceableBytes()) + ">";
4265   }
4266 };
4267 
4268 /// Dereferenceable attribute for a floating value.
4269 struct AADereferenceableFloating : AADereferenceableImpl {
4270   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4271       : AADereferenceableImpl(IRP, A) {}
4272 
4273   /// See AbstractAttribute::updateImpl(...).
4274   ChangeStatus updateImpl(Attributor &A) override {
4275     const DataLayout &DL = A.getDataLayout();
4276 
4277     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4278                             bool Stripped) -> bool {
4279       unsigned IdxWidth =
4280           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4281       APInt Offset(IdxWidth, 0);
4282       const Value *Base =
4283           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4284 
4285       const auto &AA = A.getAAFor<AADereferenceable>(
4286           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4287       int64_t DerefBytes = 0;
4288       if (!Stripped && this == &AA) {
4289         // Use IR information if we did not strip anything.
4290         // TODO: track globally.
4291         bool CanBeNull, CanBeFreed;
4292         DerefBytes =
4293             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4294         T.GlobalState.indicatePessimisticFixpoint();
4295       } else {
4296         const DerefState &DS = AA.getState();
4297         DerefBytes = DS.DerefBytesState.getAssumed();
4298         T.GlobalState &= DS.GlobalState;
4299       }
4300 
4301       // For now we do not try to "increase" dereferenceability due to negative
4302       // indices as we first have to come up with code to deal with loops and
4303       // for overflows of the dereferenceable bytes.
4304       int64_t OffsetSExt = Offset.getSExtValue();
4305       if (OffsetSExt < 0)
4306         OffsetSExt = 0;
4307 
4308       T.takeAssumedDerefBytesMinimum(
4309           std::max(int64_t(0), DerefBytes - OffsetSExt));
4310 
4311       if (this == &AA) {
4312         if (!Stripped) {
4313           // If nothing was stripped IR information is all we got.
4314           T.takeKnownDerefBytesMaximum(
4315               std::max(int64_t(0), DerefBytes - OffsetSExt));
4316           T.indicatePessimisticFixpoint();
4317         } else if (OffsetSExt > 0) {
4318           // If something was stripped but there is circular reasoning we look
4319           // for the offset. If it is positive we basically decrease the
4320           // dereferenceable bytes in a circluar loop now, which will simply
4321           // drive them down to the known value in a very slow way which we
4322           // can accelerate.
4323           T.indicatePessimisticFixpoint();
4324         }
4325       }
4326 
4327       return T.isValidState();
4328     };
4329 
4330     DerefState T;
4331     bool UsedAssumedInformation = false;
4332     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4333                                            VisitValueCB, getCtxI(),
4334                                            UsedAssumedInformation))
4335       return indicatePessimisticFixpoint();
4336 
4337     return clampStateAndIndicateChange(getState(), T);
4338   }
4339 
4340   /// See AbstractAttribute::trackStatistics()
4341   void trackStatistics() const override {
4342     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4343   }
4344 };
4345 
4346 /// Dereferenceable attribute for a return value.
4347 struct AADereferenceableReturned final
4348     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4349   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4350       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4351             IRP, A) {}
4352 
4353   /// See AbstractAttribute::trackStatistics()
4354   void trackStatistics() const override {
4355     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4356   }
4357 };
4358 
4359 /// Dereferenceable attribute for an argument
4360 struct AADereferenceableArgument final
4361     : AAArgumentFromCallSiteArguments<AADereferenceable,
4362                                       AADereferenceableImpl> {
4363   using Base =
4364       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4365   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4366       : Base(IRP, A) {}
4367 
4368   /// See AbstractAttribute::trackStatistics()
4369   void trackStatistics() const override {
4370     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4371   }
4372 };
4373 
4374 /// Dereferenceable attribute for a call site argument.
4375 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4376   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4377       : AADereferenceableFloating(IRP, A) {}
4378 
4379   /// See AbstractAttribute::trackStatistics()
4380   void trackStatistics() const override {
4381     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4382   }
4383 };
4384 
4385 /// Dereferenceable attribute deduction for a call site return value.
4386 struct AADereferenceableCallSiteReturned final
4387     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4388   using Base =
4389       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4390   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4391       : Base(IRP, A) {}
4392 
4393   /// See AbstractAttribute::trackStatistics()
4394   void trackStatistics() const override {
4395     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4396   }
4397 };
4398 
4399 // ------------------------ Align Argument Attribute ------------------------
4400 
4401 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4402                                     Value &AssociatedValue, const Use *U,
4403                                     const Instruction *I, bool &TrackUse) {
4404   // We need to follow common pointer manipulation uses to the accesses they
4405   // feed into.
4406   if (isa<CastInst>(I)) {
4407     // Follow all but ptr2int casts.
4408     TrackUse = !isa<PtrToIntInst>(I);
4409     return 0;
4410   }
4411   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4412     if (GEP->hasAllConstantIndices())
4413       TrackUse = true;
4414     return 0;
4415   }
4416 
4417   MaybeAlign MA;
4418   if (const auto *CB = dyn_cast<CallBase>(I)) {
4419     if (CB->isBundleOperand(U) || CB->isCallee(U))
4420       return 0;
4421 
4422     unsigned ArgNo = CB->getArgOperandNo(U);
4423     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4424     // As long as we only use known information there is no need to track
4425     // dependences here.
4426     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4427     MA = MaybeAlign(AlignAA.getKnownAlign());
4428   }
4429 
4430   const DataLayout &DL = A.getDataLayout();
4431   const Value *UseV = U->get();
4432   if (auto *SI = dyn_cast<StoreInst>(I)) {
4433     if (SI->getPointerOperand() == UseV)
4434       MA = SI->getAlign();
4435   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4436     if (LI->getPointerOperand() == UseV)
4437       MA = LI->getAlign();
4438   }
4439 
4440   if (!MA || *MA <= QueryingAA.getKnownAlign())
4441     return 0;
4442 
4443   unsigned Alignment = MA->value();
4444   int64_t Offset;
4445 
4446   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4447     if (Base == &AssociatedValue) {
4448       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4449       // So we can say that the maximum power of two which is a divisor of
4450       // gcd(Offset, Alignment) is an alignment.
4451 
4452       uint32_t gcd =
4453           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4454       Alignment = llvm::PowerOf2Floor(gcd);
4455     }
4456   }
4457 
4458   return Alignment;
4459 }
4460 
4461 struct AAAlignImpl : AAAlign {
4462   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4463 
4464   /// See AbstractAttribute::initialize(...).
4465   void initialize(Attributor &A) override {
4466     SmallVector<Attribute, 4> Attrs;
4467     getAttrs({Attribute::Alignment}, Attrs);
4468     for (const Attribute &Attr : Attrs)
4469       takeKnownMaximum(Attr.getValueAsInt());
4470 
4471     Value &V = getAssociatedValue();
4472     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4473     //       use of the function pointer. This was caused by D73131. We want to
4474     //       avoid this for function pointers especially because we iterate
4475     //       their uses and int2ptr is not handled. It is not a correctness
4476     //       problem though!
4477     if (!V.getType()->getPointerElementType()->isFunctionTy())
4478       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4479 
4480     if (getIRPosition().isFnInterfaceKind() &&
4481         (!getAnchorScope() ||
4482          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4483       indicatePessimisticFixpoint();
4484       return;
4485     }
4486 
4487     if (Instruction *CtxI = getCtxI())
4488       followUsesInMBEC(*this, A, getState(), *CtxI);
4489   }
4490 
4491   /// See AbstractAttribute::manifest(...).
4492   ChangeStatus manifest(Attributor &A) override {
4493     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4494 
4495     // Check for users that allow alignment annotations.
4496     Value &AssociatedValue = getAssociatedValue();
4497     for (const Use &U : AssociatedValue.uses()) {
4498       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4499         if (SI->getPointerOperand() == &AssociatedValue)
4500           if (SI->getAlignment() < getAssumedAlign()) {
4501             STATS_DECLTRACK(AAAlign, Store,
4502                             "Number of times alignment added to a store");
4503             SI->setAlignment(Align(getAssumedAlign()));
4504             LoadStoreChanged = ChangeStatus::CHANGED;
4505           }
4506       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4507         if (LI->getPointerOperand() == &AssociatedValue)
4508           if (LI->getAlignment() < getAssumedAlign()) {
4509             LI->setAlignment(Align(getAssumedAlign()));
4510             STATS_DECLTRACK(AAAlign, Load,
4511                             "Number of times alignment added to a load");
4512             LoadStoreChanged = ChangeStatus::CHANGED;
4513           }
4514       }
4515     }
4516 
4517     ChangeStatus Changed = AAAlign::manifest(A);
4518 
4519     Align InheritAlign =
4520         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4521     if (InheritAlign >= getAssumedAlign())
4522       return LoadStoreChanged;
4523     return Changed | LoadStoreChanged;
4524   }
4525 
4526   // TODO: Provide a helper to determine the implied ABI alignment and check in
4527   //       the existing manifest method and a new one for AAAlignImpl that value
4528   //       to avoid making the alignment explicit if it did not improve.
4529 
4530   /// See AbstractAttribute::getDeducedAttributes
4531   virtual void
4532   getDeducedAttributes(LLVMContext &Ctx,
4533                        SmallVectorImpl<Attribute> &Attrs) const override {
4534     if (getAssumedAlign() > 1)
4535       Attrs.emplace_back(
4536           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4537   }
4538 
4539   /// See followUsesInMBEC
4540   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4541                        AAAlign::StateType &State) {
4542     bool TrackUse = false;
4543 
4544     unsigned int KnownAlign =
4545         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4546     State.takeKnownMaximum(KnownAlign);
4547 
4548     return TrackUse;
4549   }
4550 
4551   /// See AbstractAttribute::getAsStr().
4552   const std::string getAsStr() const override {
4553     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4554                                 "-" + std::to_string(getAssumedAlign()) + ">")
4555                              : "unknown-align";
4556   }
4557 };
4558 
4559 /// Align attribute for a floating value.
4560 struct AAAlignFloating : AAAlignImpl {
4561   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4562 
4563   /// See AbstractAttribute::updateImpl(...).
4564   ChangeStatus updateImpl(Attributor &A) override {
4565     const DataLayout &DL = A.getDataLayout();
4566 
4567     auto VisitValueCB = [&](Value &V, const Instruction *,
4568                             AAAlign::StateType &T, bool Stripped) -> bool {
4569       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4570                                            DepClassTy::REQUIRED);
4571       if (!Stripped && this == &AA) {
4572         int64_t Offset;
4573         unsigned Alignment = 1;
4574         if (const Value *Base =
4575                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4576           Align PA = Base->getPointerAlignment(DL);
4577           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4578           // So we can say that the maximum power of two which is a divisor of
4579           // gcd(Offset, Alignment) is an alignment.
4580 
4581           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4582                                                uint32_t(PA.value()));
4583           Alignment = llvm::PowerOf2Floor(gcd);
4584         } else {
4585           Alignment = V.getPointerAlignment(DL).value();
4586         }
4587         // Use only IR information if we did not strip anything.
4588         T.takeKnownMaximum(Alignment);
4589         T.indicatePessimisticFixpoint();
4590       } else {
4591         // Use abstract attribute information.
4592         const AAAlign::StateType &DS = AA.getState();
4593         T ^= DS;
4594       }
4595       return T.isValidState();
4596     };
4597 
4598     StateType T;
4599     bool UsedAssumedInformation = false;
4600     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4601                                           VisitValueCB, getCtxI(),
4602                                           UsedAssumedInformation))
4603       return indicatePessimisticFixpoint();
4604 
4605     // TODO: If we know we visited all incoming values, thus no are assumed
4606     // dead, we can take the known information from the state T.
4607     return clampStateAndIndicateChange(getState(), T);
4608   }
4609 
4610   /// See AbstractAttribute::trackStatistics()
4611   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4612 };
4613 
4614 /// Align attribute for function return value.
4615 struct AAAlignReturned final
4616     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4617   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4618   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4619 
4620   /// See AbstractAttribute::initialize(...).
4621   void initialize(Attributor &A) override {
4622     Base::initialize(A);
4623     Function *F = getAssociatedFunction();
4624     if (!F || F->isDeclaration())
4625       indicatePessimisticFixpoint();
4626   }
4627 
4628   /// See AbstractAttribute::trackStatistics()
4629   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4630 };
4631 
4632 /// Align attribute for function argument.
4633 struct AAAlignArgument final
4634     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4635   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4636   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4637 
4638   /// See AbstractAttribute::manifest(...).
4639   ChangeStatus manifest(Attributor &A) override {
4640     // If the associated argument is involved in a must-tail call we give up
4641     // because we would need to keep the argument alignments of caller and
4642     // callee in-sync. Just does not seem worth the trouble right now.
4643     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4644       return ChangeStatus::UNCHANGED;
4645     return Base::manifest(A);
4646   }
4647 
4648   /// See AbstractAttribute::trackStatistics()
4649   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4650 };
4651 
4652 struct AAAlignCallSiteArgument final : AAAlignFloating {
4653   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4654       : AAAlignFloating(IRP, A) {}
4655 
4656   /// See AbstractAttribute::manifest(...).
4657   ChangeStatus manifest(Attributor &A) override {
4658     // If the associated argument is involved in a must-tail call we give up
4659     // because we would need to keep the argument alignments of caller and
4660     // callee in-sync. Just does not seem worth the trouble right now.
4661     if (Argument *Arg = getAssociatedArgument())
4662       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4663         return ChangeStatus::UNCHANGED;
4664     ChangeStatus Changed = AAAlignImpl::manifest(A);
4665     Align InheritAlign =
4666         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4667     if (InheritAlign >= getAssumedAlign())
4668       Changed = ChangeStatus::UNCHANGED;
4669     return Changed;
4670   }
4671 
4672   /// See AbstractAttribute::updateImpl(Attributor &A).
4673   ChangeStatus updateImpl(Attributor &A) override {
4674     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4675     if (Argument *Arg = getAssociatedArgument()) {
4676       // We only take known information from the argument
4677       // so we do not need to track a dependence.
4678       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4679           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4680       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4681     }
4682     return Changed;
4683   }
4684 
4685   /// See AbstractAttribute::trackStatistics()
4686   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4687 };
4688 
4689 /// Align attribute deduction for a call site return value.
4690 struct AAAlignCallSiteReturned final
4691     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4692   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4693   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4694       : Base(IRP, A) {}
4695 
4696   /// See AbstractAttribute::initialize(...).
4697   void initialize(Attributor &A) override {
4698     Base::initialize(A);
4699     Function *F = getAssociatedFunction();
4700     if (!F || F->isDeclaration())
4701       indicatePessimisticFixpoint();
4702   }
4703 
4704   /// See AbstractAttribute::trackStatistics()
4705   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4706 };
4707 
4708 /// ------------------ Function No-Return Attribute ----------------------------
4709 struct AANoReturnImpl : public AANoReturn {
4710   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4711 
4712   /// See AbstractAttribute::initialize(...).
4713   void initialize(Attributor &A) override {
4714     AANoReturn::initialize(A);
4715     Function *F = getAssociatedFunction();
4716     if (!F || F->isDeclaration())
4717       indicatePessimisticFixpoint();
4718   }
4719 
4720   /// See AbstractAttribute::getAsStr().
4721   const std::string getAsStr() const override {
4722     return getAssumed() ? "noreturn" : "may-return";
4723   }
4724 
4725   /// See AbstractAttribute::updateImpl(Attributor &A).
4726   virtual ChangeStatus updateImpl(Attributor &A) override {
4727     auto CheckForNoReturn = [](Instruction &) { return false; };
4728     bool UsedAssumedInformation = false;
4729     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4730                                    {(unsigned)Instruction::Ret},
4731                                    UsedAssumedInformation))
4732       return indicatePessimisticFixpoint();
4733     return ChangeStatus::UNCHANGED;
4734   }
4735 };
4736 
4737 struct AANoReturnFunction final : AANoReturnImpl {
4738   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4739       : AANoReturnImpl(IRP, A) {}
4740 
4741   /// See AbstractAttribute::trackStatistics()
4742   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4743 };
4744 
4745 /// NoReturn attribute deduction for a call sites.
4746 struct AANoReturnCallSite final : AANoReturnImpl {
4747   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4748       : AANoReturnImpl(IRP, A) {}
4749 
4750   /// See AbstractAttribute::initialize(...).
4751   void initialize(Attributor &A) override {
4752     AANoReturnImpl::initialize(A);
4753     if (Function *F = getAssociatedFunction()) {
4754       const IRPosition &FnPos = IRPosition::function(*F);
4755       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4756       if (!FnAA.isAssumedNoReturn())
4757         indicatePessimisticFixpoint();
4758     }
4759   }
4760 
4761   /// See AbstractAttribute::updateImpl(...).
4762   ChangeStatus updateImpl(Attributor &A) override {
4763     // TODO: Once we have call site specific value information we can provide
4764     //       call site specific liveness information and then it makes
4765     //       sense to specialize attributes for call sites arguments instead of
4766     //       redirecting requests to the callee argument.
4767     Function *F = getAssociatedFunction();
4768     const IRPosition &FnPos = IRPosition::function(*F);
4769     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4770     return clampStateAndIndicateChange(getState(), FnAA.getState());
4771   }
4772 
4773   /// See AbstractAttribute::trackStatistics()
4774   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4775 };
4776 
4777 /// ----------------------- Variable Capturing ---------------------------------
4778 
4779 /// A class to hold the state of for no-capture attributes.
4780 struct AANoCaptureImpl : public AANoCapture {
4781   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4782 
4783   /// See AbstractAttribute::initialize(...).
4784   void initialize(Attributor &A) override {
4785     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4786       indicateOptimisticFixpoint();
4787       return;
4788     }
4789     Function *AnchorScope = getAnchorScope();
4790     if (isFnInterfaceKind() &&
4791         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4792       indicatePessimisticFixpoint();
4793       return;
4794     }
4795 
4796     // You cannot "capture" null in the default address space.
4797     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4798         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4799       indicateOptimisticFixpoint();
4800       return;
4801     }
4802 
4803     const Function *F =
4804         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4805 
4806     // Check what state the associated function can actually capture.
4807     if (F)
4808       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4809     else
4810       indicatePessimisticFixpoint();
4811   }
4812 
4813   /// See AbstractAttribute::updateImpl(...).
4814   ChangeStatus updateImpl(Attributor &A) override;
4815 
4816   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4817   virtual void
4818   getDeducedAttributes(LLVMContext &Ctx,
4819                        SmallVectorImpl<Attribute> &Attrs) const override {
4820     if (!isAssumedNoCaptureMaybeReturned())
4821       return;
4822 
4823     if (isArgumentPosition()) {
4824       if (isAssumedNoCapture())
4825         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4826       else if (ManifestInternal)
4827         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4828     }
4829   }
4830 
4831   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4832   /// depending on the ability of the function associated with \p IRP to capture
4833   /// state in memory and through "returning/throwing", respectively.
4834   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4835                                                    const Function &F,
4836                                                    BitIntegerState &State) {
4837     // TODO: Once we have memory behavior attributes we should use them here.
4838 
4839     // If we know we cannot communicate or write to memory, we do not care about
4840     // ptr2int anymore.
4841     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4842         F.getReturnType()->isVoidTy()) {
4843       State.addKnownBits(NO_CAPTURE);
4844       return;
4845     }
4846 
4847     // A function cannot capture state in memory if it only reads memory, it can
4848     // however return/throw state and the state might be influenced by the
4849     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4850     if (F.onlyReadsMemory())
4851       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4852 
4853     // A function cannot communicate state back if it does not through
4854     // exceptions and doesn not return values.
4855     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4856       State.addKnownBits(NOT_CAPTURED_IN_RET);
4857 
4858     // Check existing "returned" attributes.
4859     int ArgNo = IRP.getCalleeArgNo();
4860     if (F.doesNotThrow() && ArgNo >= 0) {
4861       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4862         if (F.hasParamAttribute(u, Attribute::Returned)) {
4863           if (u == unsigned(ArgNo))
4864             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4865           else if (F.onlyReadsMemory())
4866             State.addKnownBits(NO_CAPTURE);
4867           else
4868             State.addKnownBits(NOT_CAPTURED_IN_RET);
4869           break;
4870         }
4871     }
4872   }
4873 
4874   /// See AbstractState::getAsStr().
4875   const std::string getAsStr() const override {
4876     if (isKnownNoCapture())
4877       return "known not-captured";
4878     if (isAssumedNoCapture())
4879       return "assumed not-captured";
4880     if (isKnownNoCaptureMaybeReturned())
4881       return "known not-captured-maybe-returned";
4882     if (isAssumedNoCaptureMaybeReturned())
4883       return "assumed not-captured-maybe-returned";
4884     return "assumed-captured";
4885   }
4886 };
4887 
4888 /// Attributor-aware capture tracker.
4889 struct AACaptureUseTracker final : public CaptureTracker {
4890 
4891   /// Create a capture tracker that can lookup in-flight abstract attributes
4892   /// through the Attributor \p A.
4893   ///
4894   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4895   /// search is stopped. If a use leads to a return instruction,
4896   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4897   /// If a use leads to a ptr2int which may capture the value,
4898   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4899   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4900   /// set. All values in \p PotentialCopies are later tracked as well. For every
4901   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4902   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4903   /// conservatively set to true.
4904   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4905                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4906                       SmallSetVector<Value *, 4> &PotentialCopies,
4907                       unsigned &RemainingUsesToExplore)
4908       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4909         PotentialCopies(PotentialCopies),
4910         RemainingUsesToExplore(RemainingUsesToExplore) {}
4911 
4912   /// Determine if \p V maybe captured. *Also updates the state!*
4913   bool valueMayBeCaptured(const Value *V) {
4914     if (V->getType()->isPointerTy()) {
4915       PointerMayBeCaptured(V, this);
4916     } else {
4917       State.indicatePessimisticFixpoint();
4918     }
4919     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4920   }
4921 
4922   /// See CaptureTracker::tooManyUses().
4923   void tooManyUses() override {
4924     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4925   }
4926 
4927   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4928     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4929       return true;
4930     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4931         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4932     return DerefAA.getAssumedDereferenceableBytes();
4933   }
4934 
4935   /// See CaptureTracker::captured(...).
4936   bool captured(const Use *U) override {
4937     Instruction *UInst = cast<Instruction>(U->getUser());
4938     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4939                       << "\n");
4940 
4941     // Because we may reuse the tracker multiple times we keep track of the
4942     // number of explored uses ourselves as well.
4943     if (RemainingUsesToExplore-- == 0) {
4944       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4945       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4946                           /* Return */ true);
4947     }
4948 
4949     // Deal with ptr2int by following uses.
4950     if (isa<PtrToIntInst>(UInst)) {
4951       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4952       return valueMayBeCaptured(UInst);
4953     }
4954 
4955     // For stores we check if we can follow the value through memory or not.
4956     if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4957       if (SI->isVolatile())
4958         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4959                             /* Return */ false);
4960       bool UsedAssumedInformation = false;
4961       if (!AA::getPotentialCopiesOfStoredValue(
4962               A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4963         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4964                             /* Return */ false);
4965       // Not captured directly, potential copies will be checked.
4966       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4967                           /* Return */ false);
4968     }
4969 
4970     // Explicitly catch return instructions.
4971     if (isa<ReturnInst>(UInst)) {
4972       if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4973         return isCapturedIn(/* Memory */ false, /* Integer */ false,
4974                             /* Return */ true);
4975       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4976                           /* Return */ true);
4977     }
4978 
4979     // For now we only use special logic for call sites. However, the tracker
4980     // itself knows about a lot of other non-capturing cases already.
4981     auto *CB = dyn_cast<CallBase>(UInst);
4982     if (!CB || !CB->isArgOperand(U))
4983       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4984                           /* Return */ true);
4985 
4986     unsigned ArgNo = CB->getArgOperandNo(U);
4987     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4988     // If we have a abstract no-capture attribute for the argument we can use
4989     // it to justify a non-capture attribute here. This allows recursion!
4990     auto &ArgNoCaptureAA =
4991         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4992     if (ArgNoCaptureAA.isAssumedNoCapture())
4993       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4994                           /* Return */ false);
4995     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4996       addPotentialCopy(*CB);
4997       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4998                           /* Return */ false);
4999     }
5000 
5001     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5002     return isCapturedIn(/* Memory */ true, /* Integer */ true,
5003                         /* Return */ true);
5004   }
5005 
5006   /// Register \p CS as potential copy of the value we are checking.
5007   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
5008 
5009   /// See CaptureTracker::shouldExplore(...).
5010   bool shouldExplore(const Use *U) override {
5011     // Check liveness and ignore droppable users.
5012     bool UsedAssumedInformation = false;
5013     return !U->getUser()->isDroppable() &&
5014            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
5015                             UsedAssumedInformation);
5016   }
5017 
5018   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
5019   /// \p CapturedInRet, then return the appropriate value for use in the
5020   /// CaptureTracker::captured() interface.
5021   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
5022                     bool CapturedInRet) {
5023     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5024                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5025     if (CapturedInMem)
5026       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5027     if (CapturedInInt)
5028       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5029     if (CapturedInRet)
5030       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5031     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5032   }
5033 
5034 private:
5035   /// The attributor providing in-flight abstract attributes.
5036   Attributor &A;
5037 
5038   /// The abstract attribute currently updated.
5039   AANoCapture &NoCaptureAA;
5040 
5041   /// The abstract liveness state.
5042   const AAIsDead &IsDeadAA;
5043 
5044   /// The state currently updated.
5045   AANoCapture::StateType &State;
5046 
5047   /// Set of potential copies of the tracked value.
5048   SmallSetVector<Value *, 4> &PotentialCopies;
5049 
5050   /// Global counter to limit the number of explored uses.
5051   unsigned &RemainingUsesToExplore;
5052 };
5053 
5054 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5055   const IRPosition &IRP = getIRPosition();
5056   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5057                                   : &IRP.getAssociatedValue();
5058   if (!V)
5059     return indicatePessimisticFixpoint();
5060 
5061   const Function *F =
5062       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5063   assert(F && "Expected a function!");
5064   const IRPosition &FnPos = IRPosition::function(*F);
5065   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
5066 
5067   AANoCapture::StateType T;
5068 
5069   // Readonly means we cannot capture through memory.
5070   bool IsKnown;
5071   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5072     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5073     if (IsKnown)
5074       addKnownBits(NOT_CAPTURED_IN_MEM);
5075   }
5076 
5077   // Make sure all returned values are different than the underlying value.
5078   // TODO: we could do this in a more sophisticated way inside
5079   //       AAReturnedValues, e.g., track all values that escape through returns
5080   //       directly somehow.
5081   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5082     bool SeenConstant = false;
5083     for (auto &It : RVAA.returned_values()) {
5084       if (isa<Constant>(It.first)) {
5085         if (SeenConstant)
5086           return false;
5087         SeenConstant = true;
5088       } else if (!isa<Argument>(It.first) ||
5089                  It.first == getAssociatedArgument())
5090         return false;
5091     }
5092     return true;
5093   };
5094 
5095   const auto &NoUnwindAA =
5096       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5097   if (NoUnwindAA.isAssumedNoUnwind()) {
5098     bool IsVoidTy = F->getReturnType()->isVoidTy();
5099     const AAReturnedValues *RVAA =
5100         IsVoidTy ? nullptr
5101                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5102 
5103                                                  DepClassTy::OPTIONAL);
5104     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5105       T.addKnownBits(NOT_CAPTURED_IN_RET);
5106       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5107         return ChangeStatus::UNCHANGED;
5108       if (NoUnwindAA.isKnownNoUnwind() &&
5109           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5110         addKnownBits(NOT_CAPTURED_IN_RET);
5111         if (isKnown(NOT_CAPTURED_IN_MEM))
5112           return indicateOptimisticFixpoint();
5113       }
5114     }
5115   }
5116 
5117   // Use the CaptureTracker interface and logic with the specialized tracker,
5118   // defined in AACaptureUseTracker, that can look at in-flight abstract
5119   // attributes and directly updates the assumed state.
5120   SmallSetVector<Value *, 4> PotentialCopies;
5121   unsigned RemainingUsesToExplore =
5122       getDefaultMaxUsesToExploreForCaptureTracking();
5123   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
5124                               RemainingUsesToExplore);
5125 
5126   // Check all potential copies of the associated value until we can assume
5127   // none will be captured or we have to assume at least one might be.
5128   unsigned Idx = 0;
5129   PotentialCopies.insert(V);
5130   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
5131     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
5132 
5133   AANoCapture::StateType &S = getState();
5134   auto Assumed = S.getAssumed();
5135   S.intersectAssumedBits(T.getAssumed());
5136   if (!isAssumedNoCaptureMaybeReturned())
5137     return indicatePessimisticFixpoint();
5138   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5139                                    : ChangeStatus::CHANGED;
5140 }
5141 
5142 /// NoCapture attribute for function arguments.
5143 struct AANoCaptureArgument final : AANoCaptureImpl {
5144   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5145       : AANoCaptureImpl(IRP, A) {}
5146 
5147   /// See AbstractAttribute::trackStatistics()
5148   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5149 };
5150 
5151 /// NoCapture attribute for call site arguments.
5152 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5153   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5154       : AANoCaptureImpl(IRP, A) {}
5155 
5156   /// See AbstractAttribute::initialize(...).
5157   void initialize(Attributor &A) override {
5158     if (Argument *Arg = getAssociatedArgument())
5159       if (Arg->hasByValAttr())
5160         indicateOptimisticFixpoint();
5161     AANoCaptureImpl::initialize(A);
5162   }
5163 
5164   /// See AbstractAttribute::updateImpl(...).
5165   ChangeStatus updateImpl(Attributor &A) override {
5166     // TODO: Once we have call site specific value information we can provide
5167     //       call site specific liveness information and then it makes
5168     //       sense to specialize attributes for call sites arguments instead of
5169     //       redirecting requests to the callee argument.
5170     Argument *Arg = getAssociatedArgument();
5171     if (!Arg)
5172       return indicatePessimisticFixpoint();
5173     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5174     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5175     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5176   }
5177 
5178   /// See AbstractAttribute::trackStatistics()
5179   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5180 };
5181 
5182 /// NoCapture attribute for floating values.
5183 struct AANoCaptureFloating final : AANoCaptureImpl {
5184   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5185       : AANoCaptureImpl(IRP, A) {}
5186 
5187   /// See AbstractAttribute::trackStatistics()
5188   void trackStatistics() const override {
5189     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5190   }
5191 };
5192 
5193 /// NoCapture attribute for function return value.
5194 struct AANoCaptureReturned final : AANoCaptureImpl {
5195   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5196       : AANoCaptureImpl(IRP, A) {
5197     llvm_unreachable("NoCapture is not applicable to function returns!");
5198   }
5199 
5200   /// See AbstractAttribute::initialize(...).
5201   void initialize(Attributor &A) override {
5202     llvm_unreachable("NoCapture is not applicable to function returns!");
5203   }
5204 
5205   /// See AbstractAttribute::updateImpl(...).
5206   ChangeStatus updateImpl(Attributor &A) override {
5207     llvm_unreachable("NoCapture is not applicable to function returns!");
5208   }
5209 
5210   /// See AbstractAttribute::trackStatistics()
5211   void trackStatistics() const override {}
5212 };
5213 
5214 /// NoCapture attribute deduction for a call site return value.
5215 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5216   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5217       : AANoCaptureImpl(IRP, A) {}
5218 
5219   /// See AbstractAttribute::initialize(...).
5220   void initialize(Attributor &A) override {
5221     const Function *F = getAnchorScope();
5222     // Check what state the associated function can actually capture.
5223     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5224   }
5225 
5226   /// See AbstractAttribute::trackStatistics()
5227   void trackStatistics() const override {
5228     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5229   }
5230 };
5231 
5232 /// ------------------ Value Simplify Attribute ----------------------------
5233 
5234 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5235   // FIXME: Add a typecast support.
5236   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5237       SimplifiedAssociatedValue, Other, Ty);
5238   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5239     return false;
5240 
5241   LLVM_DEBUG({
5242     if (SimplifiedAssociatedValue.hasValue())
5243       dbgs() << "[ValueSimplify] is assumed to be "
5244              << **SimplifiedAssociatedValue << "\n";
5245     else
5246       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5247   });
5248   return true;
5249 }
5250 
5251 struct AAValueSimplifyImpl : AAValueSimplify {
5252   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5253       : AAValueSimplify(IRP, A) {}
5254 
5255   /// See AbstractAttribute::initialize(...).
5256   void initialize(Attributor &A) override {
5257     if (getAssociatedValue().getType()->isVoidTy())
5258       indicatePessimisticFixpoint();
5259     if (A.hasSimplificationCallback(getIRPosition()))
5260       indicatePessimisticFixpoint();
5261   }
5262 
5263   /// See AbstractAttribute::getAsStr().
5264   const std::string getAsStr() const override {
5265     LLVM_DEBUG({
5266       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5267       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5268         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5269     });
5270     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5271                           : "not-simple";
5272   }
5273 
5274   /// See AbstractAttribute::trackStatistics()
5275   void trackStatistics() const override {}
5276 
5277   /// See AAValueSimplify::getAssumedSimplifiedValue()
5278   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5279     return SimplifiedAssociatedValue;
5280   }
5281 
5282   /// Return a value we can use as replacement for the associated one, or
5283   /// nullptr if we don't have one that makes sense.
5284   Value *getReplacementValue(Attributor &A) const {
5285     Value *NewV;
5286     NewV = SimplifiedAssociatedValue.hasValue()
5287                ? SimplifiedAssociatedValue.getValue()
5288                : UndefValue::get(getAssociatedType());
5289     if (!NewV)
5290       return nullptr;
5291     NewV = AA::getWithType(*NewV, *getAssociatedType());
5292     if (!NewV || NewV == &getAssociatedValue())
5293       return nullptr;
5294     const Instruction *CtxI = getCtxI();
5295     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5296       return nullptr;
5297     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5298       return nullptr;
5299     return NewV;
5300   }
5301 
5302   /// Helper function for querying AAValueSimplify and updating candicate.
5303   /// \param IRP The value position we are trying to unify with SimplifiedValue
5304   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5305                       const IRPosition &IRP, bool Simplify = true) {
5306     bool UsedAssumedInformation = false;
5307     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5308     if (Simplify)
5309       QueryingValueSimplified =
5310           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5311     return unionAssumed(QueryingValueSimplified);
5312   }
5313 
5314   /// Returns a candidate is found or not
5315   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5316     if (!getAssociatedValue().getType()->isIntegerTy())
5317       return false;
5318 
5319     // This will also pass the call base context.
5320     const auto &AA =
5321         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5322 
5323     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5324 
5325     if (!COpt.hasValue()) {
5326       SimplifiedAssociatedValue = llvm::None;
5327       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5328       return true;
5329     }
5330     if (auto *C = COpt.getValue()) {
5331       SimplifiedAssociatedValue = C;
5332       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5333       return true;
5334     }
5335     return false;
5336   }
5337 
5338   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5339     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5340       return true;
5341     if (askSimplifiedValueFor<AAPotentialValues>(A))
5342       return true;
5343     return false;
5344   }
5345 
5346   /// See AbstractAttribute::manifest(...).
5347   ChangeStatus manifest(Attributor &A) override {
5348     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5349     if (getAssociatedValue().user_empty())
5350       return Changed;
5351 
5352     if (auto *NewV = getReplacementValue(A)) {
5353       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5354                         << *NewV << " :: " << *this << "\n");
5355       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5356         Changed = ChangeStatus::CHANGED;
5357     }
5358 
5359     return Changed | AAValueSimplify::manifest(A);
5360   }
5361 
5362   /// See AbstractState::indicatePessimisticFixpoint(...).
5363   ChangeStatus indicatePessimisticFixpoint() override {
5364     SimplifiedAssociatedValue = &getAssociatedValue();
5365     return AAValueSimplify::indicatePessimisticFixpoint();
5366   }
5367 
5368   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5369                          LoadInst &L, function_ref<bool(Value &)> Union) {
5370     auto UnionWrapper = [&](Value &V, Value &Obj) {
5371       if (isa<AllocaInst>(Obj))
5372         return Union(V);
5373       if (!AA::isDynamicallyUnique(A, AA, V))
5374         return false;
5375       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5376         return false;
5377       return Union(V);
5378     };
5379 
5380     Value &Ptr = *L.getPointerOperand();
5381     SmallVector<Value *, 8> Objects;
5382     bool UsedAssumedInformation = false;
5383     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L,
5384                                          UsedAssumedInformation))
5385       return false;
5386 
5387     const auto *TLI =
5388         A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction());
5389     for (Value *Obj : Objects) {
5390       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5391       if (isa<UndefValue>(Obj))
5392         continue;
5393       if (isa<ConstantPointerNull>(Obj)) {
5394         // A null pointer access can be undefined but any offset from null may
5395         // be OK. We do not try to optimize the latter.
5396         if (!NullPointerIsDefined(L.getFunction(),
5397                                   Ptr.getType()->getPointerAddressSpace()) &&
5398             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5399           continue;
5400         return false;
5401       }
5402       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI);
5403       if (!InitialVal || !Union(*InitialVal))
5404         return false;
5405 
5406       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5407                            "propagation, checking accesses next.\n");
5408 
5409       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5410         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5411         if (Acc.isWrittenValueYetUndetermined())
5412           return true;
5413         Value *Content = Acc.getWrittenValue();
5414         if (!Content)
5415           return false;
5416         Value *CastedContent =
5417             AA::getWithType(*Content, *AA.getAssociatedType());
5418         if (!CastedContent)
5419           return false;
5420         if (IsExact)
5421           return UnionWrapper(*CastedContent, *Obj);
5422         if (auto *C = dyn_cast<Constant>(CastedContent))
5423           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5424             return UnionWrapper(*CastedContent, *Obj);
5425         return false;
5426       };
5427 
5428       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5429                                            DepClassTy::REQUIRED);
5430       if (!PI.forallInterferingWrites(A, AA, L, CheckAccess))
5431         return false;
5432     }
5433     return true;
5434   }
5435 };
5436 
5437 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5438   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5439       : AAValueSimplifyImpl(IRP, A) {}
5440 
5441   void initialize(Attributor &A) override {
5442     AAValueSimplifyImpl::initialize(A);
5443     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5444       indicatePessimisticFixpoint();
5445     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5446                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5447                 /* IgnoreSubsumingPositions */ true))
5448       indicatePessimisticFixpoint();
5449 
5450     // FIXME: This is a hack to prevent us from propagating function poiner in
5451     // the new pass manager CGSCC pass as it creates call edges the
5452     // CallGraphUpdater cannot handle yet.
5453     Value &V = getAssociatedValue();
5454     if (V.getType()->isPointerTy() &&
5455         V.getType()->getPointerElementType()->isFunctionTy() &&
5456         !A.isModulePass())
5457       indicatePessimisticFixpoint();
5458   }
5459 
5460   /// See AbstractAttribute::updateImpl(...).
5461   ChangeStatus updateImpl(Attributor &A) override {
5462     // Byval is only replacable if it is readonly otherwise we would write into
5463     // the replaced value and not the copy that byval creates implicitly.
5464     Argument *Arg = getAssociatedArgument();
5465     if (Arg->hasByValAttr()) {
5466       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5467       //       there is no race by not copying a constant byval.
5468       bool IsKnown;
5469       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5470         return indicatePessimisticFixpoint();
5471     }
5472 
5473     auto Before = SimplifiedAssociatedValue;
5474 
5475     auto PredForCallSite = [&](AbstractCallSite ACS) {
5476       const IRPosition &ACSArgPos =
5477           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5478       // Check if a coresponding argument was found or if it is on not
5479       // associated (which can happen for callback calls).
5480       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5481         return false;
5482 
5483       // Simplify the argument operand explicitly and check if the result is
5484       // valid in the current scope. This avoids refering to simplified values
5485       // in other functions, e.g., we don't want to say a an argument in a
5486       // static function is actually an argument in a different function.
5487       bool UsedAssumedInformation = false;
5488       Optional<Constant *> SimpleArgOp =
5489           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5490       if (!SimpleArgOp.hasValue())
5491         return true;
5492       if (!SimpleArgOp.getValue())
5493         return false;
5494       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5495         return false;
5496       return unionAssumed(*SimpleArgOp);
5497     };
5498 
5499     // Generate a answer specific to a call site context.
5500     bool Success;
5501     bool UsedAssumedInformation = false;
5502     if (hasCallBaseContext() &&
5503         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5504       Success = PredForCallSite(
5505           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5506     else
5507       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5508                                        UsedAssumedInformation);
5509 
5510     if (!Success)
5511       if (!askSimplifiedValueForOtherAAs(A))
5512         return indicatePessimisticFixpoint();
5513 
5514     // If a candicate was found in this update, return CHANGED.
5515     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5516                                                : ChangeStatus ::CHANGED;
5517   }
5518 
5519   /// See AbstractAttribute::trackStatistics()
5520   void trackStatistics() const override {
5521     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5522   }
5523 };
5524 
5525 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5526   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5527       : AAValueSimplifyImpl(IRP, A) {}
5528 
5529   /// See AAValueSimplify::getAssumedSimplifiedValue()
5530   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5531     if (!isValidState())
5532       return nullptr;
5533     return SimplifiedAssociatedValue;
5534   }
5535 
5536   /// See AbstractAttribute::updateImpl(...).
5537   ChangeStatus updateImpl(Attributor &A) override {
5538     auto Before = SimplifiedAssociatedValue;
5539 
5540     auto PredForReturned = [&](Value &V) {
5541       return checkAndUpdate(A, *this,
5542                             IRPosition::value(V, getCallBaseContext()));
5543     };
5544 
5545     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5546       if (!askSimplifiedValueForOtherAAs(A))
5547         return indicatePessimisticFixpoint();
5548 
5549     // If a candicate was found in this update, return CHANGED.
5550     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5551                                                : ChangeStatus ::CHANGED;
5552   }
5553 
5554   ChangeStatus manifest(Attributor &A) override {
5555     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5556 
5557     if (auto *NewV = getReplacementValue(A)) {
5558       auto PredForReturned =
5559           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5560             for (ReturnInst *RI : RetInsts) {
5561               Value *ReturnedVal = RI->getReturnValue();
5562               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5563                 return true;
5564               assert(RI->getFunction() == getAnchorScope() &&
5565                      "ReturnInst in wrong function!");
5566               LLVM_DEBUG(dbgs()
5567                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5568                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5569               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5570                 Changed = ChangeStatus::CHANGED;
5571             }
5572             return true;
5573           };
5574       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5575     }
5576 
5577     return Changed | AAValueSimplify::manifest(A);
5578   }
5579 
5580   /// See AbstractAttribute::trackStatistics()
5581   void trackStatistics() const override {
5582     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5583   }
5584 };
5585 
5586 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5587   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5588       : AAValueSimplifyImpl(IRP, A) {}
5589 
5590   /// See AbstractAttribute::initialize(...).
5591   void initialize(Attributor &A) override {
5592     AAValueSimplifyImpl::initialize(A);
5593     Value &V = getAnchorValue();
5594 
5595     // TODO: add other stuffs
5596     if (isa<Constant>(V))
5597       indicatePessimisticFixpoint();
5598   }
5599 
5600   /// Check if \p Cmp is a comparison we can simplify.
5601   ///
5602   /// We handle multiple cases, one in which at least one operand is an
5603   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5604   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5605   /// will be updated.
5606   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5607     auto Union = [&](Value &V) {
5608       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5609           SimplifiedAssociatedValue, &V, V.getType());
5610       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5611     };
5612 
5613     Value *LHS = Cmp.getOperand(0);
5614     Value *RHS = Cmp.getOperand(1);
5615 
5616     // Simplify the operands first.
5617     bool UsedAssumedInformation = false;
5618     const auto &SimplifiedLHS =
5619         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5620                                *this, UsedAssumedInformation);
5621     if (!SimplifiedLHS.hasValue())
5622       return true;
5623     if (!SimplifiedLHS.getValue())
5624       return false;
5625     LHS = *SimplifiedLHS;
5626 
5627     const auto &SimplifiedRHS =
5628         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5629                                *this, UsedAssumedInformation);
5630     if (!SimplifiedRHS.hasValue())
5631       return true;
5632     if (!SimplifiedRHS.getValue())
5633       return false;
5634     RHS = *SimplifiedRHS;
5635 
5636     LLVMContext &Ctx = Cmp.getContext();
5637     // Handle the trivial case first in which we don't even need to think about
5638     // null or non-null.
5639     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5640       Constant *NewVal =
5641           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5642       if (!Union(*NewVal))
5643         return false;
5644       if (!UsedAssumedInformation)
5645         indicateOptimisticFixpoint();
5646       return true;
5647     }
5648 
5649     // From now on we only handle equalities (==, !=).
5650     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5651     if (!ICmp || !ICmp->isEquality())
5652       return false;
5653 
5654     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5655     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5656     if (!LHSIsNull && !RHSIsNull)
5657       return false;
5658 
5659     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5660     // non-nullptr operand and if we assume it's non-null we can conclude the
5661     // result of the comparison.
5662     assert((LHSIsNull || RHSIsNull) &&
5663            "Expected nullptr versus non-nullptr comparison at this point");
5664 
5665     // The index is the operand that we assume is not null.
5666     unsigned PtrIdx = LHSIsNull;
5667     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5668         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5669         DepClassTy::REQUIRED);
5670     if (!PtrNonNullAA.isAssumedNonNull())
5671       return false;
5672     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5673 
5674     // The new value depends on the predicate, true for != and false for ==.
5675     Constant *NewVal = ConstantInt::get(
5676         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5677     if (!Union(*NewVal))
5678       return false;
5679 
5680     if (!UsedAssumedInformation)
5681       indicateOptimisticFixpoint();
5682 
5683     return true;
5684   }
5685 
5686   bool updateWithLoad(Attributor &A, LoadInst &L) {
5687     auto Union = [&](Value &V) {
5688       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5689           SimplifiedAssociatedValue, &V, L.getType());
5690       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5691     };
5692     return handleLoad(A, *this, L, Union);
5693   }
5694 
5695   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5696   /// simplify any operand of the instruction \p I. Return true if successful,
5697   /// in that case SimplifiedAssociatedValue will be updated.
5698   bool handleGenericInst(Attributor &A, Instruction &I) {
5699     bool SomeSimplified = false;
5700     bool UsedAssumedInformation = false;
5701 
5702     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5703     int Idx = 0;
5704     for (Value *Op : I.operands()) {
5705       const auto &SimplifiedOp =
5706           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5707                                  *this, UsedAssumedInformation);
5708       // If we are not sure about any operand we are not sure about the entire
5709       // instruction, we'll wait.
5710       if (!SimplifiedOp.hasValue())
5711         return true;
5712 
5713       if (SimplifiedOp.getValue())
5714         NewOps[Idx] = SimplifiedOp.getValue();
5715       else
5716         NewOps[Idx] = Op;
5717 
5718       SomeSimplified |= (NewOps[Idx] != Op);
5719       ++Idx;
5720     }
5721 
5722     // We won't bother with the InstSimplify interface if we didn't simplify any
5723     // operand ourselves.
5724     if (!SomeSimplified)
5725       return false;
5726 
5727     InformationCache &InfoCache = A.getInfoCache();
5728     Function *F = I.getFunction();
5729     const auto *DT =
5730         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5731     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5732     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5733     OptimizationRemarkEmitter *ORE = nullptr;
5734 
5735     const DataLayout &DL = I.getModule()->getDataLayout();
5736     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5737     if (Value *SimplifiedI =
5738             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5739       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5740           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5741       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5742     }
5743     return false;
5744   }
5745 
5746   /// See AbstractAttribute::updateImpl(...).
5747   ChangeStatus updateImpl(Attributor &A) override {
5748     auto Before = SimplifiedAssociatedValue;
5749 
5750     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5751                             bool Stripped) -> bool {
5752       auto &AA = A.getAAFor<AAValueSimplify>(
5753           *this, IRPosition::value(V, getCallBaseContext()),
5754           DepClassTy::REQUIRED);
5755       if (!Stripped && this == &AA) {
5756 
5757         if (auto *I = dyn_cast<Instruction>(&V)) {
5758           if (auto *LI = dyn_cast<LoadInst>(&V))
5759             if (updateWithLoad(A, *LI))
5760               return true;
5761           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5762             if (handleCmp(A, *Cmp))
5763               return true;
5764           if (handleGenericInst(A, *I))
5765             return true;
5766         }
5767         // TODO: Look the instruction and check recursively.
5768 
5769         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5770                           << "\n");
5771         return false;
5772       }
5773       return checkAndUpdate(A, *this,
5774                             IRPosition::value(V, getCallBaseContext()));
5775     };
5776 
5777     bool Dummy = false;
5778     bool UsedAssumedInformation = false;
5779     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5780                                      VisitValueCB, getCtxI(),
5781                                      UsedAssumedInformation,
5782                                      /* UseValueSimplify */ false))
5783       if (!askSimplifiedValueForOtherAAs(A))
5784         return indicatePessimisticFixpoint();
5785 
5786     // If a candicate was found in this update, return CHANGED.
5787     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5788                                                : ChangeStatus ::CHANGED;
5789   }
5790 
5791   /// See AbstractAttribute::trackStatistics()
5792   void trackStatistics() const override {
5793     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5794   }
5795 };
5796 
5797 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5798   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5799       : AAValueSimplifyImpl(IRP, A) {}
5800 
5801   /// See AbstractAttribute::initialize(...).
5802   void initialize(Attributor &A) override {
5803     SimplifiedAssociatedValue = nullptr;
5804     indicateOptimisticFixpoint();
5805   }
5806   /// See AbstractAttribute::initialize(...).
5807   ChangeStatus updateImpl(Attributor &A) override {
5808     llvm_unreachable(
5809         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5810   }
5811   /// See AbstractAttribute::trackStatistics()
5812   void trackStatistics() const override {
5813     STATS_DECLTRACK_FN_ATTR(value_simplify)
5814   }
5815 };
5816 
5817 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5818   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5819       : AAValueSimplifyFunction(IRP, A) {}
5820   /// See AbstractAttribute::trackStatistics()
5821   void trackStatistics() const override {
5822     STATS_DECLTRACK_CS_ATTR(value_simplify)
5823   }
5824 };
5825 
5826 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5827   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5828       : AAValueSimplifyImpl(IRP, A) {}
5829 
5830   void initialize(Attributor &A) override {
5831     AAValueSimplifyImpl::initialize(A);
5832     if (!getAssociatedFunction())
5833       indicatePessimisticFixpoint();
5834   }
5835 
5836   /// See AbstractAttribute::updateImpl(...).
5837   ChangeStatus updateImpl(Attributor &A) override {
5838     auto Before = SimplifiedAssociatedValue;
5839     auto &RetAA = A.getAAFor<AAReturnedValues>(
5840         *this, IRPosition::function(*getAssociatedFunction()),
5841         DepClassTy::REQUIRED);
5842     auto PredForReturned =
5843         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5844           bool UsedAssumedInformation = false;
5845           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5846               &RetVal, *cast<CallBase>(getCtxI()), *this,
5847               UsedAssumedInformation);
5848           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5849               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5850           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5851         };
5852     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5853       if (!askSimplifiedValueForOtherAAs(A))
5854         return indicatePessimisticFixpoint();
5855     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5856                                                : ChangeStatus ::CHANGED;
5857   }
5858 
5859   void trackStatistics() const override {
5860     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5861   }
5862 };
5863 
5864 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5865   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5866       : AAValueSimplifyFloating(IRP, A) {}
5867 
5868   /// See AbstractAttribute::manifest(...).
5869   ChangeStatus manifest(Attributor &A) override {
5870     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5871 
5872     if (auto *NewV = getReplacementValue(A)) {
5873       Use &U = cast<CallBase>(&getAnchorValue())
5874                    ->getArgOperandUse(getCallSiteArgNo());
5875       if (A.changeUseAfterManifest(U, *NewV))
5876         Changed = ChangeStatus::CHANGED;
5877     }
5878 
5879     return Changed | AAValueSimplify::manifest(A);
5880   }
5881 
5882   void trackStatistics() const override {
5883     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5884   }
5885 };
5886 
5887 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5888 struct AAHeapToStackFunction final : public AAHeapToStack {
5889 
5890   struct AllocationInfo {
5891     /// The call that allocates the memory.
5892     CallBase *const CB;
5893 
5894     /// The library function id for the allocation.
5895     LibFunc LibraryFunctionId = NotLibFunc;
5896 
5897     /// The status wrt. a rewrite.
5898     enum {
5899       STACK_DUE_TO_USE,
5900       STACK_DUE_TO_FREE,
5901       INVALID,
5902     } Status = STACK_DUE_TO_USE;
5903 
5904     /// Flag to indicate if we encountered a use that might free this allocation
5905     /// but which is not in the deallocation infos.
5906     bool HasPotentiallyFreeingUnknownUses = false;
5907 
5908     /// The set of free calls that use this allocation.
5909     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5910   };
5911 
5912   struct DeallocationInfo {
5913     /// The call that deallocates the memory.
5914     CallBase *const CB;
5915 
5916     /// Flag to indicate if we don't know all objects this deallocation might
5917     /// free.
5918     bool MightFreeUnknownObjects = false;
5919 
5920     /// The set of allocation calls that are potentially freed.
5921     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5922   };
5923 
5924   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5925       : AAHeapToStack(IRP, A) {}
5926 
5927   ~AAHeapToStackFunction() {
5928     // Ensure we call the destructor so we release any memory allocated in the
5929     // sets.
5930     for (auto &It : AllocationInfos)
5931       It.getSecond()->~AllocationInfo();
5932     for (auto &It : DeallocationInfos)
5933       It.getSecond()->~DeallocationInfo();
5934   }
5935 
5936   void initialize(Attributor &A) override {
5937     AAHeapToStack::initialize(A);
5938 
5939     const Function *F = getAnchorScope();
5940     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5941 
5942     auto AllocationIdentifierCB = [&](Instruction &I) {
5943       CallBase *CB = dyn_cast<CallBase>(&I);
5944       if (!CB)
5945         return true;
5946       if (isFreeCall(CB, TLI)) {
5947         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5948         return true;
5949       }
5950       // To do heap to stack, we need to know that the allocation itself is
5951       // removable once uses are rewritten, and that we can initialize the
5952       // alloca to the same pattern as the original allocation result.
5953       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
5954         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
5955         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
5956           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
5957           AllocationInfos[CB] = AI;
5958           TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5959         }
5960       }
5961       return true;
5962     };
5963 
5964     bool UsedAssumedInformation = false;
5965     bool Success = A.checkForAllCallLikeInstructions(
5966         AllocationIdentifierCB, *this, UsedAssumedInformation,
5967         /* CheckBBLivenessOnly */ false,
5968         /* CheckPotentiallyDead */ true);
5969     (void)Success;
5970     assert(Success && "Did not expect the call base visit callback to fail!");
5971   }
5972 
5973   const std::string getAsStr() const override {
5974     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5975     for (const auto &It : AllocationInfos) {
5976       if (It.second->Status == AllocationInfo::INVALID)
5977         ++NumInvalidMallocs;
5978       else
5979         ++NumH2SMallocs;
5980     }
5981     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5982            std::to_string(NumInvalidMallocs);
5983   }
5984 
5985   /// See AbstractAttribute::trackStatistics().
5986   void trackStatistics() const override {
5987     STATS_DECL(
5988         MallocCalls, Function,
5989         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5990     for (auto &It : AllocationInfos)
5991       if (It.second->Status != AllocationInfo::INVALID)
5992         ++BUILD_STAT_NAME(MallocCalls, Function);
5993   }
5994 
5995   bool isAssumedHeapToStack(const CallBase &CB) const override {
5996     if (isValidState())
5997       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5998         return AI->Status != AllocationInfo::INVALID;
5999     return false;
6000   }
6001 
6002   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
6003     if (!isValidState())
6004       return false;
6005 
6006     for (auto &It : AllocationInfos) {
6007       AllocationInfo &AI = *It.second;
6008       if (AI.Status == AllocationInfo::INVALID)
6009         continue;
6010 
6011       if (AI.PotentialFreeCalls.count(&CB))
6012         return true;
6013     }
6014 
6015     return false;
6016   }
6017 
6018   ChangeStatus manifest(Attributor &A) override {
6019     assert(getState().isValidState() &&
6020            "Attempted to manifest an invalid state!");
6021 
6022     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
6023     Function *F = getAnchorScope();
6024     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6025 
6026     for (auto &It : AllocationInfos) {
6027       AllocationInfo &AI = *It.second;
6028       if (AI.Status == AllocationInfo::INVALID)
6029         continue;
6030 
6031       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
6032         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
6033         A.deleteAfterManifest(*FreeCall);
6034         HasChanged = ChangeStatus::CHANGED;
6035       }
6036 
6037       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
6038                         << "\n");
6039 
6040       auto Remark = [&](OptimizationRemark OR) {
6041         LibFunc IsAllocShared;
6042         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
6043           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
6044             return OR << "Moving globalized variable to the stack.";
6045         return OR << "Moving memory allocation from the heap to the stack.";
6046       };
6047       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6048         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
6049       else
6050         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
6051 
6052       const DataLayout &DL = A.getInfoCache().getDL();
6053       Value *Size;
6054       Optional<APInt> SizeAPI = getSize(A, *this, AI);
6055       if (SizeAPI.hasValue()) {
6056         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
6057       } else {
6058         LLVMContext &Ctx = AI.CB->getContext();
6059         ObjectSizeOpts Opts;
6060         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
6061         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
6062         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
6063                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
6064         Size = SizeOffsetPair.first;
6065       }
6066 
6067       Align Alignment(1);
6068       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
6069         Alignment = max(Alignment, RetAlign);
6070       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6071         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
6072         assert(AlignmentAPI.hasValue() &&
6073                "Expected an alignment during manifest!");
6074         Alignment =
6075             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
6076       }
6077 
6078       // TODO: Hoist the alloca towards the function entry.
6079       unsigned AS = DL.getAllocaAddrSpace();
6080       Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS,
6081                                            Size, Alignment, "", AI.CB);
6082 
6083       if (Alloca->getType() != AI.CB->getType())
6084         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6085             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
6086 
6087       auto *I8Ty = Type::getInt8Ty(F->getContext());
6088       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
6089       assert(InitVal &&
6090              "Must be able to materialize initial memory state of allocation");
6091 
6092       A.changeValueAfterManifest(*AI.CB, *Alloca);
6093 
6094       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
6095         auto *NBB = II->getNormalDest();
6096         BranchInst::Create(NBB, AI.CB->getParent());
6097         A.deleteAfterManifest(*AI.CB);
6098       } else {
6099         A.deleteAfterManifest(*AI.CB);
6100       }
6101 
6102       // Initialize the alloca with the same value as used by the allocation
6103       // function.  We can skip undef as the initial value of an alloc is
6104       // undef, and the memset would simply end up being DSEd.
6105       if (!isa<UndefValue>(InitVal)) {
6106         IRBuilder<> Builder(Alloca->getNextNode());
6107         // TODO: Use alignment above if align!=1
6108         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6109       }
6110       HasChanged = ChangeStatus::CHANGED;
6111     }
6112 
6113     return HasChanged;
6114   }
6115 
6116   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6117                            Value &V) {
6118     bool UsedAssumedInformation = false;
6119     Optional<Constant *> SimpleV =
6120         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6121     if (!SimpleV.hasValue())
6122       return APInt(64, 0);
6123     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6124       return CI->getValue();
6125     return llvm::None;
6126   }
6127 
6128   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6129                           AllocationInfo &AI) {
6130     auto Mapper = [&](const Value *V) -> const Value * {
6131       bool UsedAssumedInformation = false;
6132       if (Optional<Constant *> SimpleV =
6133               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6134         if (*SimpleV)
6135           return *SimpleV;
6136       return V;
6137     };
6138 
6139     const Function *F = getAnchorScope();
6140     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6141     return getAllocSize(AI.CB, TLI, Mapper);
6142   }
6143 
6144   /// Collection of all malloc-like calls in a function with associated
6145   /// information.
6146   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
6147 
6148   /// Collection of all free-like calls in a function with associated
6149   /// information.
6150   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
6151 
6152   ChangeStatus updateImpl(Attributor &A) override;
6153 };
6154 
6155 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6156   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6157   const Function *F = getAnchorScope();
6158   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6159 
6160   const auto &LivenessAA =
6161       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6162 
6163   MustBeExecutedContextExplorer &Explorer =
6164       A.getInfoCache().getMustBeExecutedContextExplorer();
6165 
6166   bool StackIsAccessibleByOtherThreads =
6167       A.getInfoCache().stackIsAccessibleByOtherThreads();
6168 
6169   // Flag to ensure we update our deallocation information at most once per
6170   // updateImpl call and only if we use the free check reasoning.
6171   bool HasUpdatedFrees = false;
6172 
6173   auto UpdateFrees = [&]() {
6174     HasUpdatedFrees = true;
6175 
6176     for (auto &It : DeallocationInfos) {
6177       DeallocationInfo &DI = *It.second;
6178       // For now we cannot use deallocations that have unknown inputs, skip
6179       // them.
6180       if (DI.MightFreeUnknownObjects)
6181         continue;
6182 
6183       // No need to analyze dead calls, ignore them instead.
6184       bool UsedAssumedInformation = false;
6185       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6186                           /* CheckBBLivenessOnly */ true))
6187         continue;
6188 
6189       // Use the optimistic version to get the freed objects, ignoring dead
6190       // branches etc.
6191       SmallVector<Value *, 8> Objects;
6192       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6193                                            *this, DI.CB,
6194                                            UsedAssumedInformation)) {
6195         LLVM_DEBUG(
6196             dbgs()
6197             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6198         DI.MightFreeUnknownObjects = true;
6199         continue;
6200       }
6201 
6202       // Check each object explicitly.
6203       for (auto *Obj : Objects) {
6204         // Free of null and undef can be ignored as no-ops (or UB in the latter
6205         // case).
6206         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6207           continue;
6208 
6209         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6210         if (!ObjCB) {
6211           LLVM_DEBUG(dbgs()
6212                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6213           DI.MightFreeUnknownObjects = true;
6214           continue;
6215         }
6216 
6217         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6218         if (!AI) {
6219           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6220                             << "\n");
6221           DI.MightFreeUnknownObjects = true;
6222           continue;
6223         }
6224 
6225         DI.PotentialAllocationCalls.insert(ObjCB);
6226       }
6227     }
6228   };
6229 
6230   auto FreeCheck = [&](AllocationInfo &AI) {
6231     // If the stack is not accessible by other threads, the "must-free" logic
6232     // doesn't apply as the pointer could be shared and needs to be places in
6233     // "shareable" memory.
6234     if (!StackIsAccessibleByOtherThreads) {
6235       auto &NoSyncAA =
6236           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6237       if (!NoSyncAA.isAssumedNoSync()) {
6238         LLVM_DEBUG(
6239             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6240                       "other threads and function is not nosync:\n");
6241         return false;
6242       }
6243     }
6244     if (!HasUpdatedFrees)
6245       UpdateFrees();
6246 
6247     // TODO: Allow multi exit functions that have different free calls.
6248     if (AI.PotentialFreeCalls.size() != 1) {
6249       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6250                         << AI.PotentialFreeCalls.size() << "\n");
6251       return false;
6252     }
6253     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6254     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6255     if (!DI) {
6256       LLVM_DEBUG(
6257           dbgs() << "[H2S] unique free call was not known as deallocation call "
6258                  << *UniqueFree << "\n");
6259       return false;
6260     }
6261     if (DI->MightFreeUnknownObjects) {
6262       LLVM_DEBUG(
6263           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6264       return false;
6265     }
6266     if (DI->PotentialAllocationCalls.size() > 1) {
6267       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6268                         << DI->PotentialAllocationCalls.size()
6269                         << " different allocations\n");
6270       return false;
6271     }
6272     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6273       LLVM_DEBUG(
6274           dbgs()
6275           << "[H2S] unique free call not known to free this allocation but "
6276           << **DI->PotentialAllocationCalls.begin() << "\n");
6277       return false;
6278     }
6279     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6280     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6281       LLVM_DEBUG(
6282           dbgs()
6283           << "[H2S] unique free call might not be executed with the allocation "
6284           << *UniqueFree << "\n");
6285       return false;
6286     }
6287     return true;
6288   };
6289 
6290   auto UsesCheck = [&](AllocationInfo &AI) {
6291     bool ValidUsesOnly = true;
6292 
6293     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6294       Instruction *UserI = cast<Instruction>(U.getUser());
6295       if (isa<LoadInst>(UserI))
6296         return true;
6297       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6298         if (SI->getValueOperand() == U.get()) {
6299           LLVM_DEBUG(dbgs()
6300                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6301           ValidUsesOnly = false;
6302         } else {
6303           // A store into the malloc'ed memory is fine.
6304         }
6305         return true;
6306       }
6307       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6308         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6309           return true;
6310         if (DeallocationInfos.count(CB)) {
6311           AI.PotentialFreeCalls.insert(CB);
6312           return true;
6313         }
6314 
6315         unsigned ArgNo = CB->getArgOperandNo(&U);
6316 
6317         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6318             *this, IRPosition::callsite_argument(*CB, ArgNo),
6319             DepClassTy::OPTIONAL);
6320 
6321         // If a call site argument use is nofree, we are fine.
6322         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6323             *this, IRPosition::callsite_argument(*CB, ArgNo),
6324             DepClassTy::OPTIONAL);
6325 
6326         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6327         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6328         if (MaybeCaptured ||
6329             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6330              MaybeFreed)) {
6331           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6332 
6333           // Emit a missed remark if this is missed OpenMP globalization.
6334           auto Remark = [&](OptimizationRemarkMissed ORM) {
6335             return ORM
6336                    << "Could not move globalized variable to the stack. "
6337                       "Variable is potentially captured in call. Mark "
6338                       "parameter as `__attribute__((noescape))` to override.";
6339           };
6340 
6341           if (ValidUsesOnly &&
6342               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6343             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6344 
6345           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6346           ValidUsesOnly = false;
6347         }
6348         return true;
6349       }
6350 
6351       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6352           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6353         Follow = true;
6354         return true;
6355       }
6356       // Unknown user for which we can not track uses further (in a way that
6357       // makes sense).
6358       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6359       ValidUsesOnly = false;
6360       return true;
6361     };
6362     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6363       return false;
6364     return ValidUsesOnly;
6365   };
6366 
6367   // The actual update starts here. We look at all allocations and depending on
6368   // their status perform the appropriate check(s).
6369   for (auto &It : AllocationInfos) {
6370     AllocationInfo &AI = *It.second;
6371     if (AI.Status == AllocationInfo::INVALID)
6372       continue;
6373 
6374     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6375       if (!getAPInt(A, *this, *Align)) {
6376         // Can't generate an alloca which respects the required alignment
6377         // on the allocation.
6378         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6379                           << "\n");
6380         AI.Status = AllocationInfo::INVALID;
6381         Changed = ChangeStatus::CHANGED;
6382         continue;
6383       }
6384     }
6385 
6386     if (MaxHeapToStackSize != -1) {
6387       Optional<APInt> Size = getSize(A, *this, AI);
6388       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6389         LLVM_DEBUG({
6390           if (!Size.hasValue())
6391             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6392           else
6393             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6394                    << MaxHeapToStackSize << "\n";
6395         });
6396 
6397         AI.Status = AllocationInfo::INVALID;
6398         Changed = ChangeStatus::CHANGED;
6399         continue;
6400       }
6401     }
6402 
6403     switch (AI.Status) {
6404     case AllocationInfo::STACK_DUE_TO_USE:
6405       if (UsesCheck(AI))
6406         continue;
6407       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6408       LLVM_FALLTHROUGH;
6409     case AllocationInfo::STACK_DUE_TO_FREE:
6410       if (FreeCheck(AI))
6411         continue;
6412       AI.Status = AllocationInfo::INVALID;
6413       Changed = ChangeStatus::CHANGED;
6414       continue;
6415     case AllocationInfo::INVALID:
6416       llvm_unreachable("Invalid allocations should never reach this point!");
6417     };
6418   }
6419 
6420   return Changed;
6421 }
6422 
6423 /// ----------------------- Privatizable Pointers ------------------------------
6424 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6425   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6426       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6427 
6428   ChangeStatus indicatePessimisticFixpoint() override {
6429     AAPrivatizablePtr::indicatePessimisticFixpoint();
6430     PrivatizableType = nullptr;
6431     return ChangeStatus::CHANGED;
6432   }
6433 
6434   /// Identify the type we can chose for a private copy of the underlying
6435   /// argument. None means it is not clear yet, nullptr means there is none.
6436   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6437 
6438   /// Return a privatizable type that encloses both T0 and T1.
6439   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6440   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6441     if (!T0.hasValue())
6442       return T1;
6443     if (!T1.hasValue())
6444       return T0;
6445     if (T0 == T1)
6446       return T0;
6447     return nullptr;
6448   }
6449 
6450   Optional<Type *> getPrivatizableType() const override {
6451     return PrivatizableType;
6452   }
6453 
6454   const std::string getAsStr() const override {
6455     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6456   }
6457 
6458 protected:
6459   Optional<Type *> PrivatizableType;
6460 };
6461 
6462 // TODO: Do this for call site arguments (probably also other values) as well.
6463 
6464 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6465   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6466       : AAPrivatizablePtrImpl(IRP, A) {}
6467 
6468   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6469   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6470     // If this is a byval argument and we know all the call sites (so we can
6471     // rewrite them), there is no need to check them explicitly.
6472     bool UsedAssumedInformation = false;
6473     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6474         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6475                                true, UsedAssumedInformation))
6476       return getAssociatedValue().getType()->getPointerElementType();
6477 
6478     Optional<Type *> Ty;
6479     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6480 
6481     // Make sure the associated call site argument has the same type at all call
6482     // sites and it is an allocation we know is safe to privatize, for now that
6483     // means we only allow alloca instructions.
6484     // TODO: We can additionally analyze the accesses in the callee to  create
6485     //       the type from that information instead. That is a little more
6486     //       involved and will be done in a follow up patch.
6487     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6488       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6489       // Check if a coresponding argument was found or if it is one not
6490       // associated (which can happen for callback calls).
6491       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6492         return false;
6493 
6494       // Check that all call sites agree on a type.
6495       auto &PrivCSArgAA =
6496           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6497       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6498 
6499       LLVM_DEBUG({
6500         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6501         if (CSTy.hasValue() && CSTy.getValue())
6502           CSTy.getValue()->print(dbgs());
6503         else if (CSTy.hasValue())
6504           dbgs() << "<nullptr>";
6505         else
6506           dbgs() << "<none>";
6507       });
6508 
6509       Ty = combineTypes(Ty, CSTy);
6510 
6511       LLVM_DEBUG({
6512         dbgs() << " : New Type: ";
6513         if (Ty.hasValue() && Ty.getValue())
6514           Ty.getValue()->print(dbgs());
6515         else if (Ty.hasValue())
6516           dbgs() << "<nullptr>";
6517         else
6518           dbgs() << "<none>";
6519         dbgs() << "\n";
6520       });
6521 
6522       return !Ty.hasValue() || Ty.getValue();
6523     };
6524 
6525     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6526                                 UsedAssumedInformation))
6527       return nullptr;
6528     return Ty;
6529   }
6530 
6531   /// See AbstractAttribute::updateImpl(...).
6532   ChangeStatus updateImpl(Attributor &A) override {
6533     PrivatizableType = identifyPrivatizableType(A);
6534     if (!PrivatizableType.hasValue())
6535       return ChangeStatus::UNCHANGED;
6536     if (!PrivatizableType.getValue())
6537       return indicatePessimisticFixpoint();
6538 
6539     // The dependence is optional so we don't give up once we give up on the
6540     // alignment.
6541     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6542                         DepClassTy::OPTIONAL);
6543 
6544     // Avoid arguments with padding for now.
6545     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6546         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6547                                                 A.getInfoCache().getDL())) {
6548       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6549       return indicatePessimisticFixpoint();
6550     }
6551 
6552     // Collect the types that will replace the privatizable type in the function
6553     // signature.
6554     SmallVector<Type *, 16> ReplacementTypes;
6555     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6556 
6557     // Verify callee and caller agree on how the promoted argument would be
6558     // passed.
6559     Function &Fn = *getIRPosition().getAnchorScope();
6560     const auto *TTI =
6561         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6562     if (!TTI) {
6563       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6564                         << Fn.getName() << "\n");
6565       return indicatePessimisticFixpoint();
6566     }
6567 
6568     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6569       CallBase *CB = ACS.getInstruction();
6570       return TTI->areTypesABICompatible(
6571           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6572     };
6573     bool UsedAssumedInformation = false;
6574     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6575                                 UsedAssumedInformation)) {
6576       LLVM_DEBUG(
6577           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6578                  << Fn.getName() << "\n");
6579       return indicatePessimisticFixpoint();
6580     }
6581 
6582     // Register a rewrite of the argument.
6583     Argument *Arg = getAssociatedArgument();
6584     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6585       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6586       return indicatePessimisticFixpoint();
6587     }
6588 
6589     unsigned ArgNo = Arg->getArgNo();
6590 
6591     // Helper to check if for the given call site the associated argument is
6592     // passed to a callback where the privatization would be different.
6593     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6594       SmallVector<const Use *, 4> CallbackUses;
6595       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6596       for (const Use *U : CallbackUses) {
6597         AbstractCallSite CBACS(U);
6598         assert(CBACS && CBACS.isCallbackCall());
6599         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6600           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6601 
6602           LLVM_DEBUG({
6603             dbgs()
6604                 << "[AAPrivatizablePtr] Argument " << *Arg
6605                 << "check if can be privatized in the context of its parent ("
6606                 << Arg->getParent()->getName()
6607                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6608                    "callback ("
6609                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6610                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6611                 << CBACS.getCallArgOperand(CBArg) << " vs "
6612                 << CB.getArgOperand(ArgNo) << "\n"
6613                 << "[AAPrivatizablePtr] " << CBArg << " : "
6614                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6615           });
6616 
6617           if (CBArgNo != int(ArgNo))
6618             continue;
6619           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6620               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6621           if (CBArgPrivAA.isValidState()) {
6622             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6623             if (!CBArgPrivTy.hasValue())
6624               continue;
6625             if (CBArgPrivTy.getValue() == PrivatizableType)
6626               continue;
6627           }
6628 
6629           LLVM_DEBUG({
6630             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6631                    << " cannot be privatized in the context of its parent ("
6632                    << Arg->getParent()->getName()
6633                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6634                       "callback ("
6635                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6636                    << ").\n[AAPrivatizablePtr] for which the argument "
6637                       "privatization is not compatible.\n";
6638           });
6639           return false;
6640         }
6641       }
6642       return true;
6643     };
6644 
6645     // Helper to check if for the given call site the associated argument is
6646     // passed to a direct call where the privatization would be different.
6647     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6648       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6649       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6650       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6651              "Expected a direct call operand for callback call operand");
6652 
6653       LLVM_DEBUG({
6654         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6655                << " check if be privatized in the context of its parent ("
6656                << Arg->getParent()->getName()
6657                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6658                   "direct call of ("
6659                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6660                << ").\n";
6661       });
6662 
6663       Function *DCCallee = DC->getCalledFunction();
6664       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6665         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6666             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6667             DepClassTy::REQUIRED);
6668         if (DCArgPrivAA.isValidState()) {
6669           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6670           if (!DCArgPrivTy.hasValue())
6671             return true;
6672           if (DCArgPrivTy.getValue() == PrivatizableType)
6673             return true;
6674         }
6675       }
6676 
6677       LLVM_DEBUG({
6678         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6679                << " cannot be privatized in the context of its parent ("
6680                << Arg->getParent()->getName()
6681                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6682                   "direct call of ("
6683                << ACS.getInstruction()->getCalledFunction()->getName()
6684                << ").\n[AAPrivatizablePtr] for which the argument "
6685                   "privatization is not compatible.\n";
6686       });
6687       return false;
6688     };
6689 
6690     // Helper to check if the associated argument is used at the given abstract
6691     // call site in a way that is incompatible with the privatization assumed
6692     // here.
6693     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6694       if (ACS.isDirectCall())
6695         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6696       if (ACS.isCallbackCall())
6697         return IsCompatiblePrivArgOfDirectCS(ACS);
6698       return false;
6699     };
6700 
6701     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6702                                 UsedAssumedInformation))
6703       return indicatePessimisticFixpoint();
6704 
6705     return ChangeStatus::UNCHANGED;
6706   }
6707 
6708   /// Given a type to private \p PrivType, collect the constituates (which are
6709   /// used) in \p ReplacementTypes.
6710   static void
6711   identifyReplacementTypes(Type *PrivType,
6712                            SmallVectorImpl<Type *> &ReplacementTypes) {
6713     // TODO: For now we expand the privatization type to the fullest which can
6714     //       lead to dead arguments that need to be removed later.
6715     assert(PrivType && "Expected privatizable type!");
6716 
6717     // Traverse the type, extract constituate types on the outermost level.
6718     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6719       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6720         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6721     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6722       ReplacementTypes.append(PrivArrayType->getNumElements(),
6723                               PrivArrayType->getElementType());
6724     } else {
6725       ReplacementTypes.push_back(PrivType);
6726     }
6727   }
6728 
6729   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6730   /// The values needed are taken from the arguments of \p F starting at
6731   /// position \p ArgNo.
6732   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6733                                    unsigned ArgNo, Instruction &IP) {
6734     assert(PrivType && "Expected privatizable type!");
6735 
6736     IRBuilder<NoFolder> IRB(&IP);
6737     const DataLayout &DL = F.getParent()->getDataLayout();
6738 
6739     // Traverse the type, build GEPs and stores.
6740     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6741       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6742       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6743         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6744         Value *Ptr =
6745             constructPointer(PointeeTy, PrivType, &Base,
6746                              PrivStructLayout->getElementOffset(u), IRB, DL);
6747         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6748       }
6749     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6750       Type *PointeeTy = PrivArrayType->getElementType();
6751       Type *PointeePtrTy = PointeeTy->getPointerTo();
6752       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6753       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6754         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6755                                       u * PointeeTySize, IRB, DL);
6756         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6757       }
6758     } else {
6759       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6760     }
6761   }
6762 
6763   /// Extract values from \p Base according to the type \p PrivType at the
6764   /// call position \p ACS. The values are appended to \p ReplacementValues.
6765   void createReplacementValues(Align Alignment, Type *PrivType,
6766                                AbstractCallSite ACS, Value *Base,
6767                                SmallVectorImpl<Value *> &ReplacementValues) {
6768     assert(Base && "Expected base value!");
6769     assert(PrivType && "Expected privatizable type!");
6770     Instruction *IP = ACS.getInstruction();
6771 
6772     IRBuilder<NoFolder> IRB(IP);
6773     const DataLayout &DL = IP->getModule()->getDataLayout();
6774 
6775     Type *PrivPtrType = PrivType->getPointerTo();
6776     if (Base->getType() != PrivPtrType)
6777       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6778           Base, PrivPtrType, "", ACS.getInstruction());
6779 
6780     // Traverse the type, build GEPs and loads.
6781     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6782       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6783       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6784         Type *PointeeTy = PrivStructType->getElementType(u);
6785         Value *Ptr =
6786             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6787                              PrivStructLayout->getElementOffset(u), IRB, DL);
6788         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6789         L->setAlignment(Alignment);
6790         ReplacementValues.push_back(L);
6791       }
6792     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6793       Type *PointeeTy = PrivArrayType->getElementType();
6794       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6795       Type *PointeePtrTy = PointeeTy->getPointerTo();
6796       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6797         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6798                                       u * PointeeTySize, IRB, DL);
6799         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6800         L->setAlignment(Alignment);
6801         ReplacementValues.push_back(L);
6802       }
6803     } else {
6804       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6805       L->setAlignment(Alignment);
6806       ReplacementValues.push_back(L);
6807     }
6808   }
6809 
6810   /// See AbstractAttribute::manifest(...)
6811   ChangeStatus manifest(Attributor &A) override {
6812     if (!PrivatizableType.hasValue())
6813       return ChangeStatus::UNCHANGED;
6814     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6815 
6816     // Collect all tail calls in the function as we cannot allow new allocas to
6817     // escape into tail recursion.
6818     // TODO: Be smarter about new allocas escaping into tail calls.
6819     SmallVector<CallInst *, 16> TailCalls;
6820     bool UsedAssumedInformation = false;
6821     if (!A.checkForAllInstructions(
6822             [&](Instruction &I) {
6823               CallInst &CI = cast<CallInst>(I);
6824               if (CI.isTailCall())
6825                 TailCalls.push_back(&CI);
6826               return true;
6827             },
6828             *this, {Instruction::Call}, UsedAssumedInformation))
6829       return ChangeStatus::UNCHANGED;
6830 
6831     Argument *Arg = getAssociatedArgument();
6832     // Query AAAlign attribute for alignment of associated argument to
6833     // determine the best alignment of loads.
6834     const auto &AlignAA =
6835         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6836 
6837     // Callback to repair the associated function. A new alloca is placed at the
6838     // beginning and initialized with the values passed through arguments. The
6839     // new alloca replaces the use of the old pointer argument.
6840     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6841         [=](const Attributor::ArgumentReplacementInfo &ARI,
6842             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6843           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6844           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6845           const DataLayout &DL = IP->getModule()->getDataLayout();
6846           unsigned AS = DL.getAllocaAddrSpace();
6847           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
6848                                            Arg->getName() + ".priv", IP);
6849           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6850                                ArgIt->getArgNo(), *IP);
6851 
6852           if (AI->getType() != Arg->getType())
6853             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6854                 AI, Arg->getType(), "", IP);
6855           Arg->replaceAllUsesWith(AI);
6856 
6857           for (CallInst *CI : TailCalls)
6858             CI->setTailCall(false);
6859         };
6860 
6861     // Callback to repair a call site of the associated function. The elements
6862     // of the privatizable type are loaded prior to the call and passed to the
6863     // new function version.
6864     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6865         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6866                       AbstractCallSite ACS,
6867                       SmallVectorImpl<Value *> &NewArgOperands) {
6868           // When no alignment is specified for the load instruction,
6869           // natural alignment is assumed.
6870           createReplacementValues(
6871               assumeAligned(AlignAA.getAssumedAlign()),
6872               PrivatizableType.getValue(), ACS,
6873               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6874               NewArgOperands);
6875         };
6876 
6877     // Collect the types that will replace the privatizable type in the function
6878     // signature.
6879     SmallVector<Type *, 16> ReplacementTypes;
6880     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6881 
6882     // Register a rewrite of the argument.
6883     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6884                                            std::move(FnRepairCB),
6885                                            std::move(ACSRepairCB)))
6886       return ChangeStatus::CHANGED;
6887     return ChangeStatus::UNCHANGED;
6888   }
6889 
6890   /// See AbstractAttribute::trackStatistics()
6891   void trackStatistics() const override {
6892     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6893   }
6894 };
6895 
6896 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6897   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6898       : AAPrivatizablePtrImpl(IRP, A) {}
6899 
6900   /// See AbstractAttribute::initialize(...).
6901   virtual void initialize(Attributor &A) override {
6902     // TODO: We can privatize more than arguments.
6903     indicatePessimisticFixpoint();
6904   }
6905 
6906   ChangeStatus updateImpl(Attributor &A) override {
6907     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6908                      "updateImpl will not be called");
6909   }
6910 
6911   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6912   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6913     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6914     if (!Obj) {
6915       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6916       return nullptr;
6917     }
6918 
6919     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6920       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6921         if (CI->isOne())
6922           return AI->getAllocatedType();
6923     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6924       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6925           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6926       if (PrivArgAA.isAssumedPrivatizablePtr())
6927         return Obj->getType()->getPointerElementType();
6928     }
6929 
6930     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6931                          "alloca nor privatizable argument: "
6932                       << *Obj << "!\n");
6933     return nullptr;
6934   }
6935 
6936   /// See AbstractAttribute::trackStatistics()
6937   void trackStatistics() const override {
6938     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6939   }
6940 };
6941 
6942 struct AAPrivatizablePtrCallSiteArgument final
6943     : public AAPrivatizablePtrFloating {
6944   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6945       : AAPrivatizablePtrFloating(IRP, A) {}
6946 
6947   /// See AbstractAttribute::initialize(...).
6948   void initialize(Attributor &A) override {
6949     if (getIRPosition().hasAttr(Attribute::ByVal))
6950       indicateOptimisticFixpoint();
6951   }
6952 
6953   /// See AbstractAttribute::updateImpl(...).
6954   ChangeStatus updateImpl(Attributor &A) override {
6955     PrivatizableType = identifyPrivatizableType(A);
6956     if (!PrivatizableType.hasValue())
6957       return ChangeStatus::UNCHANGED;
6958     if (!PrivatizableType.getValue())
6959       return indicatePessimisticFixpoint();
6960 
6961     const IRPosition &IRP = getIRPosition();
6962     auto &NoCaptureAA =
6963         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6964     if (!NoCaptureAA.isAssumedNoCapture()) {
6965       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6966       return indicatePessimisticFixpoint();
6967     }
6968 
6969     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6970     if (!NoAliasAA.isAssumedNoAlias()) {
6971       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6972       return indicatePessimisticFixpoint();
6973     }
6974 
6975     bool IsKnown;
6976     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
6977       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6978       return indicatePessimisticFixpoint();
6979     }
6980 
6981     return ChangeStatus::UNCHANGED;
6982   }
6983 
6984   /// See AbstractAttribute::trackStatistics()
6985   void trackStatistics() const override {
6986     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6987   }
6988 };
6989 
6990 struct AAPrivatizablePtrCallSiteReturned final
6991     : public AAPrivatizablePtrFloating {
6992   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6993       : AAPrivatizablePtrFloating(IRP, A) {}
6994 
6995   /// See AbstractAttribute::initialize(...).
6996   void initialize(Attributor &A) override {
6997     // TODO: We can privatize more than arguments.
6998     indicatePessimisticFixpoint();
6999   }
7000 
7001   /// See AbstractAttribute::trackStatistics()
7002   void trackStatistics() const override {
7003     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
7004   }
7005 };
7006 
7007 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
7008   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
7009       : AAPrivatizablePtrFloating(IRP, A) {}
7010 
7011   /// See AbstractAttribute::initialize(...).
7012   void initialize(Attributor &A) override {
7013     // TODO: We can privatize more than arguments.
7014     indicatePessimisticFixpoint();
7015   }
7016 
7017   /// See AbstractAttribute::trackStatistics()
7018   void trackStatistics() const override {
7019     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
7020   }
7021 };
7022 
7023 /// -------------------- Memory Behavior Attributes ----------------------------
7024 /// Includes read-none, read-only, and write-only.
7025 /// ----------------------------------------------------------------------------
7026 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
7027   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
7028       : AAMemoryBehavior(IRP, A) {}
7029 
7030   /// See AbstractAttribute::initialize(...).
7031   void initialize(Attributor &A) override {
7032     intersectAssumedBits(BEST_STATE);
7033     getKnownStateFromValue(getIRPosition(), getState());
7034     AAMemoryBehavior::initialize(A);
7035   }
7036 
7037   /// Return the memory behavior information encoded in the IR for \p IRP.
7038   static void getKnownStateFromValue(const IRPosition &IRP,
7039                                      BitIntegerState &State,
7040                                      bool IgnoreSubsumingPositions = false) {
7041     SmallVector<Attribute, 2> Attrs;
7042     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7043     for (const Attribute &Attr : Attrs) {
7044       switch (Attr.getKindAsEnum()) {
7045       case Attribute::ReadNone:
7046         State.addKnownBits(NO_ACCESSES);
7047         break;
7048       case Attribute::ReadOnly:
7049         State.addKnownBits(NO_WRITES);
7050         break;
7051       case Attribute::WriteOnly:
7052         State.addKnownBits(NO_READS);
7053         break;
7054       default:
7055         llvm_unreachable("Unexpected attribute!");
7056       }
7057     }
7058 
7059     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
7060       if (!I->mayReadFromMemory())
7061         State.addKnownBits(NO_READS);
7062       if (!I->mayWriteToMemory())
7063         State.addKnownBits(NO_WRITES);
7064     }
7065   }
7066 
7067   /// See AbstractAttribute::getDeducedAttributes(...).
7068   void getDeducedAttributes(LLVMContext &Ctx,
7069                             SmallVectorImpl<Attribute> &Attrs) const override {
7070     assert(Attrs.size() == 0);
7071     if (isAssumedReadNone())
7072       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7073     else if (isAssumedReadOnly())
7074       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7075     else if (isAssumedWriteOnly())
7076       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7077     assert(Attrs.size() <= 1);
7078   }
7079 
7080   /// See AbstractAttribute::manifest(...).
7081   ChangeStatus manifest(Attributor &A) override {
7082     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7083       return ChangeStatus::UNCHANGED;
7084 
7085     const IRPosition &IRP = getIRPosition();
7086 
7087     // Check if we would improve the existing attributes first.
7088     SmallVector<Attribute, 4> DeducedAttrs;
7089     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7090     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7091           return IRP.hasAttr(Attr.getKindAsEnum(),
7092                              /* IgnoreSubsumingPositions */ true);
7093         }))
7094       return ChangeStatus::UNCHANGED;
7095 
7096     // Clear existing attributes.
7097     IRP.removeAttrs(AttrKinds);
7098 
7099     // Use the generic manifest method.
7100     return IRAttribute::manifest(A);
7101   }
7102 
7103   /// See AbstractState::getAsStr().
7104   const std::string getAsStr() const override {
7105     if (isAssumedReadNone())
7106       return "readnone";
7107     if (isAssumedReadOnly())
7108       return "readonly";
7109     if (isAssumedWriteOnly())
7110       return "writeonly";
7111     return "may-read/write";
7112   }
7113 
7114   /// The set of IR attributes AAMemoryBehavior deals with.
7115   static const Attribute::AttrKind AttrKinds[3];
7116 };
7117 
7118 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7119     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7120 
7121 /// Memory behavior attribute for a floating value.
7122 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7123   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7124       : AAMemoryBehaviorImpl(IRP, A) {}
7125 
7126   /// See AbstractAttribute::updateImpl(...).
7127   ChangeStatus updateImpl(Attributor &A) override;
7128 
7129   /// See AbstractAttribute::trackStatistics()
7130   void trackStatistics() const override {
7131     if (isAssumedReadNone())
7132       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7133     else if (isAssumedReadOnly())
7134       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7135     else if (isAssumedWriteOnly())
7136       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7137   }
7138 
7139 private:
7140   /// Return true if users of \p UserI might access the underlying
7141   /// variable/location described by \p U and should therefore be analyzed.
7142   bool followUsersOfUseIn(Attributor &A, const Use &U,
7143                           const Instruction *UserI);
7144 
7145   /// Update the state according to the effect of use \p U in \p UserI.
7146   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7147 };
7148 
7149 /// Memory behavior attribute for function argument.
7150 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7151   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7152       : AAMemoryBehaviorFloating(IRP, A) {}
7153 
7154   /// See AbstractAttribute::initialize(...).
7155   void initialize(Attributor &A) override {
7156     intersectAssumedBits(BEST_STATE);
7157     const IRPosition &IRP = getIRPosition();
7158     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7159     // can query it when we use has/getAttr. That would allow us to reuse the
7160     // initialize of the base class here.
7161     bool HasByVal =
7162         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7163     getKnownStateFromValue(IRP, getState(),
7164                            /* IgnoreSubsumingPositions */ HasByVal);
7165 
7166     // Initialize the use vector with all direct uses of the associated value.
7167     Argument *Arg = getAssociatedArgument();
7168     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7169       indicatePessimisticFixpoint();
7170   }
7171 
7172   ChangeStatus manifest(Attributor &A) override {
7173     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7174     if (!getAssociatedValue().getType()->isPointerTy())
7175       return ChangeStatus::UNCHANGED;
7176 
7177     // TODO: From readattrs.ll: "inalloca parameters are always
7178     //                           considered written"
7179     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7180       removeKnownBits(NO_WRITES);
7181       removeAssumedBits(NO_WRITES);
7182     }
7183     return AAMemoryBehaviorFloating::manifest(A);
7184   }
7185 
7186   /// See AbstractAttribute::trackStatistics()
7187   void trackStatistics() const override {
7188     if (isAssumedReadNone())
7189       STATS_DECLTRACK_ARG_ATTR(readnone)
7190     else if (isAssumedReadOnly())
7191       STATS_DECLTRACK_ARG_ATTR(readonly)
7192     else if (isAssumedWriteOnly())
7193       STATS_DECLTRACK_ARG_ATTR(writeonly)
7194   }
7195 };
7196 
7197 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7198   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7199       : AAMemoryBehaviorArgument(IRP, A) {}
7200 
7201   /// See AbstractAttribute::initialize(...).
7202   void initialize(Attributor &A) override {
7203     // If we don't have an associated attribute this is either a variadic call
7204     // or an indirect call, either way, nothing to do here.
7205     Argument *Arg = getAssociatedArgument();
7206     if (!Arg) {
7207       indicatePessimisticFixpoint();
7208       return;
7209     }
7210     if (Arg->hasByValAttr()) {
7211       addKnownBits(NO_WRITES);
7212       removeKnownBits(NO_READS);
7213       removeAssumedBits(NO_READS);
7214     }
7215     AAMemoryBehaviorArgument::initialize(A);
7216     if (getAssociatedFunction()->isDeclaration())
7217       indicatePessimisticFixpoint();
7218   }
7219 
7220   /// See AbstractAttribute::updateImpl(...).
7221   ChangeStatus updateImpl(Attributor &A) override {
7222     // TODO: Once we have call site specific value information we can provide
7223     //       call site specific liveness liveness information and then it makes
7224     //       sense to specialize attributes for call sites arguments instead of
7225     //       redirecting requests to the callee argument.
7226     Argument *Arg = getAssociatedArgument();
7227     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7228     auto &ArgAA =
7229         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7230     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7231   }
7232 
7233   /// See AbstractAttribute::trackStatistics()
7234   void trackStatistics() const override {
7235     if (isAssumedReadNone())
7236       STATS_DECLTRACK_CSARG_ATTR(readnone)
7237     else if (isAssumedReadOnly())
7238       STATS_DECLTRACK_CSARG_ATTR(readonly)
7239     else if (isAssumedWriteOnly())
7240       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7241   }
7242 };
7243 
7244 /// Memory behavior attribute for a call site return position.
7245 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7246   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7247       : AAMemoryBehaviorFloating(IRP, A) {}
7248 
7249   /// See AbstractAttribute::initialize(...).
7250   void initialize(Attributor &A) override {
7251     AAMemoryBehaviorImpl::initialize(A);
7252     Function *F = getAssociatedFunction();
7253     if (!F || F->isDeclaration())
7254       indicatePessimisticFixpoint();
7255   }
7256 
7257   /// See AbstractAttribute::manifest(...).
7258   ChangeStatus manifest(Attributor &A) override {
7259     // We do not annotate returned values.
7260     return ChangeStatus::UNCHANGED;
7261   }
7262 
7263   /// See AbstractAttribute::trackStatistics()
7264   void trackStatistics() const override {}
7265 };
7266 
7267 /// An AA to represent the memory behavior function attributes.
7268 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7269   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7270       : AAMemoryBehaviorImpl(IRP, A) {}
7271 
7272   /// See AbstractAttribute::updateImpl(Attributor &A).
7273   virtual ChangeStatus updateImpl(Attributor &A) override;
7274 
7275   /// See AbstractAttribute::manifest(...).
7276   ChangeStatus manifest(Attributor &A) override {
7277     Function &F = cast<Function>(getAnchorValue());
7278     if (isAssumedReadNone()) {
7279       F.removeFnAttr(Attribute::ArgMemOnly);
7280       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7281       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7282     }
7283     return AAMemoryBehaviorImpl::manifest(A);
7284   }
7285 
7286   /// See AbstractAttribute::trackStatistics()
7287   void trackStatistics() const override {
7288     if (isAssumedReadNone())
7289       STATS_DECLTRACK_FN_ATTR(readnone)
7290     else if (isAssumedReadOnly())
7291       STATS_DECLTRACK_FN_ATTR(readonly)
7292     else if (isAssumedWriteOnly())
7293       STATS_DECLTRACK_FN_ATTR(writeonly)
7294   }
7295 };
7296 
7297 /// AAMemoryBehavior attribute for call sites.
7298 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7299   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7300       : AAMemoryBehaviorImpl(IRP, A) {}
7301 
7302   /// See AbstractAttribute::initialize(...).
7303   void initialize(Attributor &A) override {
7304     AAMemoryBehaviorImpl::initialize(A);
7305     Function *F = getAssociatedFunction();
7306     if (!F || F->isDeclaration())
7307       indicatePessimisticFixpoint();
7308   }
7309 
7310   /// See AbstractAttribute::updateImpl(...).
7311   ChangeStatus updateImpl(Attributor &A) override {
7312     // TODO: Once we have call site specific value information we can provide
7313     //       call site specific liveness liveness information and then it makes
7314     //       sense to specialize attributes for call sites arguments instead of
7315     //       redirecting requests to the callee argument.
7316     Function *F = getAssociatedFunction();
7317     const IRPosition &FnPos = IRPosition::function(*F);
7318     auto &FnAA =
7319         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7320     return clampStateAndIndicateChange(getState(), FnAA.getState());
7321   }
7322 
7323   /// See AbstractAttribute::trackStatistics()
7324   void trackStatistics() const override {
7325     if (isAssumedReadNone())
7326       STATS_DECLTRACK_CS_ATTR(readnone)
7327     else if (isAssumedReadOnly())
7328       STATS_DECLTRACK_CS_ATTR(readonly)
7329     else if (isAssumedWriteOnly())
7330       STATS_DECLTRACK_CS_ATTR(writeonly)
7331   }
7332 };
7333 
7334 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7335 
7336   // The current assumed state used to determine a change.
7337   auto AssumedState = getAssumed();
7338 
7339   auto CheckRWInst = [&](Instruction &I) {
7340     // If the instruction has an own memory behavior state, use it to restrict
7341     // the local state. No further analysis is required as the other memory
7342     // state is as optimistic as it gets.
7343     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7344       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7345           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7346       intersectAssumedBits(MemBehaviorAA.getAssumed());
7347       return !isAtFixpoint();
7348     }
7349 
7350     // Remove access kind modifiers if necessary.
7351     if (I.mayReadFromMemory())
7352       removeAssumedBits(NO_READS);
7353     if (I.mayWriteToMemory())
7354       removeAssumedBits(NO_WRITES);
7355     return !isAtFixpoint();
7356   };
7357 
7358   bool UsedAssumedInformation = false;
7359   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7360                                           UsedAssumedInformation))
7361     return indicatePessimisticFixpoint();
7362 
7363   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7364                                         : ChangeStatus::UNCHANGED;
7365 }
7366 
7367 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7368 
7369   const IRPosition &IRP = getIRPosition();
7370   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7371   AAMemoryBehavior::StateType &S = getState();
7372 
7373   // First, check the function scope. We take the known information and we avoid
7374   // work if the assumed information implies the current assumed information for
7375   // this attribute. This is a valid for all but byval arguments.
7376   Argument *Arg = IRP.getAssociatedArgument();
7377   AAMemoryBehavior::base_t FnMemAssumedState =
7378       AAMemoryBehavior::StateType::getWorstState();
7379   if (!Arg || !Arg->hasByValAttr()) {
7380     const auto &FnMemAA =
7381         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7382     FnMemAssumedState = FnMemAA.getAssumed();
7383     S.addKnownBits(FnMemAA.getKnown());
7384     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7385       return ChangeStatus::UNCHANGED;
7386   }
7387 
7388   // The current assumed state used to determine a change.
7389   auto AssumedState = S.getAssumed();
7390 
7391   // Make sure the value is not captured (except through "return"), if
7392   // it is, any information derived would be irrelevant anyway as we cannot
7393   // check the potential aliases introduced by the capture. However, no need
7394   // to fall back to anythign less optimistic than the function state.
7395   const auto &ArgNoCaptureAA =
7396       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7397   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7398     S.intersectAssumedBits(FnMemAssumedState);
7399     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7400                                           : ChangeStatus::UNCHANGED;
7401   }
7402 
7403   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7404   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7405     Instruction *UserI = cast<Instruction>(U.getUser());
7406     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7407                       << " \n");
7408 
7409     // Droppable users, e.g., llvm::assume does not actually perform any action.
7410     if (UserI->isDroppable())
7411       return true;
7412 
7413     // Check if the users of UserI should also be visited.
7414     Follow = followUsersOfUseIn(A, U, UserI);
7415 
7416     // If UserI might touch memory we analyze the use in detail.
7417     if (UserI->mayReadOrWriteMemory())
7418       analyzeUseIn(A, U, UserI);
7419 
7420     return !isAtFixpoint();
7421   };
7422 
7423   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7424     return indicatePessimisticFixpoint();
7425 
7426   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7427                                         : ChangeStatus::UNCHANGED;
7428 }
7429 
7430 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7431                                                   const Instruction *UserI) {
7432   // The loaded value is unrelated to the pointer argument, no need to
7433   // follow the users of the load.
7434   if (isa<LoadInst>(UserI))
7435     return false;
7436 
7437   // By default we follow all uses assuming UserI might leak information on U,
7438   // we have special handling for call sites operands though.
7439   const auto *CB = dyn_cast<CallBase>(UserI);
7440   if (!CB || !CB->isArgOperand(&U))
7441     return true;
7442 
7443   // If the use is a call argument known not to be captured, the users of
7444   // the call do not need to be visited because they have to be unrelated to
7445   // the input. Note that this check is not trivial even though we disallow
7446   // general capturing of the underlying argument. The reason is that the
7447   // call might the argument "through return", which we allow and for which we
7448   // need to check call users.
7449   if (U.get()->getType()->isPointerTy()) {
7450     unsigned ArgNo = CB->getArgOperandNo(&U);
7451     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7452         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7453     return !ArgNoCaptureAA.isAssumedNoCapture();
7454   }
7455 
7456   return true;
7457 }
7458 
7459 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7460                                             const Instruction *UserI) {
7461   assert(UserI->mayReadOrWriteMemory());
7462 
7463   switch (UserI->getOpcode()) {
7464   default:
7465     // TODO: Handle all atomics and other side-effect operations we know of.
7466     break;
7467   case Instruction::Load:
7468     // Loads cause the NO_READS property to disappear.
7469     removeAssumedBits(NO_READS);
7470     return;
7471 
7472   case Instruction::Store:
7473     // Stores cause the NO_WRITES property to disappear if the use is the
7474     // pointer operand. Note that while capturing was taken care of somewhere
7475     // else we need to deal with stores of the value that is not looked through.
7476     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7477       removeAssumedBits(NO_WRITES);
7478     else
7479       indicatePessimisticFixpoint();
7480     return;
7481 
7482   case Instruction::Call:
7483   case Instruction::CallBr:
7484   case Instruction::Invoke: {
7485     // For call sites we look at the argument memory behavior attribute (this
7486     // could be recursive!) in order to restrict our own state.
7487     const auto *CB = cast<CallBase>(UserI);
7488 
7489     // Give up on operand bundles.
7490     if (CB->isBundleOperand(&U)) {
7491       indicatePessimisticFixpoint();
7492       return;
7493     }
7494 
7495     // Calling a function does read the function pointer, maybe write it if the
7496     // function is self-modifying.
7497     if (CB->isCallee(&U)) {
7498       removeAssumedBits(NO_READS);
7499       break;
7500     }
7501 
7502     // Adjust the possible access behavior based on the information on the
7503     // argument.
7504     IRPosition Pos;
7505     if (U.get()->getType()->isPointerTy())
7506       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7507     else
7508       Pos = IRPosition::callsite_function(*CB);
7509     const auto &MemBehaviorAA =
7510         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7511     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7512     // and at least "known".
7513     intersectAssumedBits(MemBehaviorAA.getAssumed());
7514     return;
7515   }
7516   };
7517 
7518   // Generally, look at the "may-properties" and adjust the assumed state if we
7519   // did not trigger special handling before.
7520   if (UserI->mayReadFromMemory())
7521     removeAssumedBits(NO_READS);
7522   if (UserI->mayWriteToMemory())
7523     removeAssumedBits(NO_WRITES);
7524 }
7525 
7526 /// -------------------- Memory Locations Attributes ---------------------------
7527 /// Includes read-none, argmemonly, inaccessiblememonly,
7528 /// inaccessiblememorargmemonly
7529 /// ----------------------------------------------------------------------------
7530 
7531 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7532     AAMemoryLocation::MemoryLocationsKind MLK) {
7533   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7534     return "all memory";
7535   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7536     return "no memory";
7537   std::string S = "memory:";
7538   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7539     S += "stack,";
7540   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7541     S += "constant,";
7542   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7543     S += "internal global,";
7544   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7545     S += "external global,";
7546   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7547     S += "argument,";
7548   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7549     S += "inaccessible,";
7550   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7551     S += "malloced,";
7552   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7553     S += "unknown,";
7554   S.pop_back();
7555   return S;
7556 }
7557 
7558 struct AAMemoryLocationImpl : public AAMemoryLocation {
7559 
7560   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7561       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7562     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7563       AccessKind2Accesses[u] = nullptr;
7564   }
7565 
7566   ~AAMemoryLocationImpl() {
7567     // The AccessSets are allocated via a BumpPtrAllocator, we call
7568     // the destructor manually.
7569     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7570       if (AccessKind2Accesses[u])
7571         AccessKind2Accesses[u]->~AccessSet();
7572   }
7573 
7574   /// See AbstractAttribute::initialize(...).
7575   void initialize(Attributor &A) override {
7576     intersectAssumedBits(BEST_STATE);
7577     getKnownStateFromValue(A, getIRPosition(), getState());
7578     AAMemoryLocation::initialize(A);
7579   }
7580 
7581   /// Return the memory behavior information encoded in the IR for \p IRP.
7582   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7583                                      BitIntegerState &State,
7584                                      bool IgnoreSubsumingPositions = false) {
7585     // For internal functions we ignore `argmemonly` and
7586     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7587     // constant propagation. It is unclear if this is the best way but it is
7588     // unlikely this will cause real performance problems. If we are deriving
7589     // attributes for the anchor function we even remove the attribute in
7590     // addition to ignoring it.
7591     bool UseArgMemOnly = true;
7592     Function *AnchorFn = IRP.getAnchorScope();
7593     if (AnchorFn && A.isRunOn(*AnchorFn))
7594       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7595 
7596     SmallVector<Attribute, 2> Attrs;
7597     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7598     for (const Attribute &Attr : Attrs) {
7599       switch (Attr.getKindAsEnum()) {
7600       case Attribute::ReadNone:
7601         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7602         break;
7603       case Attribute::InaccessibleMemOnly:
7604         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7605         break;
7606       case Attribute::ArgMemOnly:
7607         if (UseArgMemOnly)
7608           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7609         else
7610           IRP.removeAttrs({Attribute::ArgMemOnly});
7611         break;
7612       case Attribute::InaccessibleMemOrArgMemOnly:
7613         if (UseArgMemOnly)
7614           State.addKnownBits(inverseLocation(
7615               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7616         else
7617           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7618         break;
7619       default:
7620         llvm_unreachable("Unexpected attribute!");
7621       }
7622     }
7623   }
7624 
7625   /// See AbstractAttribute::getDeducedAttributes(...).
7626   void getDeducedAttributes(LLVMContext &Ctx,
7627                             SmallVectorImpl<Attribute> &Attrs) const override {
7628     assert(Attrs.size() == 0);
7629     if (isAssumedReadNone()) {
7630       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7631     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7632       if (isAssumedInaccessibleMemOnly())
7633         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7634       else if (isAssumedArgMemOnly())
7635         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7636       else if (isAssumedInaccessibleOrArgMemOnly())
7637         Attrs.push_back(
7638             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7639     }
7640     assert(Attrs.size() <= 1);
7641   }
7642 
7643   /// See AbstractAttribute::manifest(...).
7644   ChangeStatus manifest(Attributor &A) override {
7645     const IRPosition &IRP = getIRPosition();
7646 
7647     // Check if we would improve the existing attributes first.
7648     SmallVector<Attribute, 4> DeducedAttrs;
7649     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7650     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7651           return IRP.hasAttr(Attr.getKindAsEnum(),
7652                              /* IgnoreSubsumingPositions */ true);
7653         }))
7654       return ChangeStatus::UNCHANGED;
7655 
7656     // Clear existing attributes.
7657     IRP.removeAttrs(AttrKinds);
7658     if (isAssumedReadNone())
7659       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7660 
7661     // Use the generic manifest method.
7662     return IRAttribute::manifest(A);
7663   }
7664 
7665   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7666   bool checkForAllAccessesToMemoryKind(
7667       function_ref<bool(const Instruction *, const Value *, AccessKind,
7668                         MemoryLocationsKind)>
7669           Pred,
7670       MemoryLocationsKind RequestedMLK) const override {
7671     if (!isValidState())
7672       return false;
7673 
7674     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7675     if (AssumedMLK == NO_LOCATIONS)
7676       return true;
7677 
7678     unsigned Idx = 0;
7679     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7680          CurMLK *= 2, ++Idx) {
7681       if (CurMLK & RequestedMLK)
7682         continue;
7683 
7684       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7685         for (const AccessInfo &AI : *Accesses)
7686           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7687             return false;
7688     }
7689 
7690     return true;
7691   }
7692 
7693   ChangeStatus indicatePessimisticFixpoint() override {
7694     // If we give up and indicate a pessimistic fixpoint this instruction will
7695     // become an access for all potential access kinds:
7696     // TODO: Add pointers for argmemonly and globals to improve the results of
7697     //       checkForAllAccessesToMemoryKind.
7698     bool Changed = false;
7699     MemoryLocationsKind KnownMLK = getKnown();
7700     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7701     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7702       if (!(CurMLK & KnownMLK))
7703         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7704                                   getAccessKindFromInst(I));
7705     return AAMemoryLocation::indicatePessimisticFixpoint();
7706   }
7707 
7708 protected:
7709   /// Helper struct to tie together an instruction that has a read or write
7710   /// effect with the pointer it accesses (if any).
7711   struct AccessInfo {
7712 
7713     /// The instruction that caused the access.
7714     const Instruction *I;
7715 
7716     /// The base pointer that is accessed, or null if unknown.
7717     const Value *Ptr;
7718 
7719     /// The kind of access (read/write/read+write).
7720     AccessKind Kind;
7721 
7722     bool operator==(const AccessInfo &RHS) const {
7723       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7724     }
7725     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7726       if (LHS.I != RHS.I)
7727         return LHS.I < RHS.I;
7728       if (LHS.Ptr != RHS.Ptr)
7729         return LHS.Ptr < RHS.Ptr;
7730       if (LHS.Kind != RHS.Kind)
7731         return LHS.Kind < RHS.Kind;
7732       return false;
7733     }
7734   };
7735 
7736   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7737   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7738   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7739   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7740 
7741   /// Categorize the pointer arguments of CB that might access memory in
7742   /// AccessedLoc and update the state and access map accordingly.
7743   void
7744   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7745                                      AAMemoryLocation::StateType &AccessedLocs,
7746                                      bool &Changed);
7747 
7748   /// Return the kind(s) of location that may be accessed by \p V.
7749   AAMemoryLocation::MemoryLocationsKind
7750   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7751 
7752   /// Return the access kind as determined by \p I.
7753   AccessKind getAccessKindFromInst(const Instruction *I) {
7754     AccessKind AK = READ_WRITE;
7755     if (I) {
7756       AK = I->mayReadFromMemory() ? READ : NONE;
7757       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7758     }
7759     return AK;
7760   }
7761 
7762   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7763   /// an access of kind \p AK to a \p MLK memory location with the access
7764   /// pointer \p Ptr.
7765   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7766                                  MemoryLocationsKind MLK, const Instruction *I,
7767                                  const Value *Ptr, bool &Changed,
7768                                  AccessKind AK = READ_WRITE) {
7769 
7770     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7771     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7772     if (!Accesses)
7773       Accesses = new (Allocator) AccessSet();
7774     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7775     State.removeAssumedBits(MLK);
7776   }
7777 
7778   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7779   /// arguments, and update the state and access map accordingly.
7780   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7781                           AAMemoryLocation::StateType &State, bool &Changed);
7782 
7783   /// Used to allocate access sets.
7784   BumpPtrAllocator &Allocator;
7785 
7786   /// The set of IR attributes AAMemoryLocation deals with.
7787   static const Attribute::AttrKind AttrKinds[4];
7788 };
7789 
7790 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7791     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7792     Attribute::InaccessibleMemOrArgMemOnly};
7793 
7794 void AAMemoryLocationImpl::categorizePtrValue(
7795     Attributor &A, const Instruction &I, const Value &Ptr,
7796     AAMemoryLocation::StateType &State, bool &Changed) {
7797   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7798                     << Ptr << " ["
7799                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7800 
7801   SmallVector<Value *, 8> Objects;
7802   bool UsedAssumedInformation = false;
7803   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
7804                                        UsedAssumedInformation,
7805                                        /* Intraprocedural */ true)) {
7806     LLVM_DEBUG(
7807         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7808     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7809                               getAccessKindFromInst(&I));
7810     return;
7811   }
7812 
7813   for (Value *Obj : Objects) {
7814     // TODO: recognize the TBAA used for constant accesses.
7815     MemoryLocationsKind MLK = NO_LOCATIONS;
7816     if (isa<UndefValue>(Obj))
7817       continue;
7818     if (isa<Argument>(Obj)) {
7819       // TODO: For now we do not treat byval arguments as local copies performed
7820       // on the call edge, though, we should. To make that happen we need to
7821       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7822       // would also allow us to mark functions only accessing byval arguments as
7823       // readnone again, atguably their acceses have no effect outside of the
7824       // function, like accesses to allocas.
7825       MLK = NO_ARGUMENT_MEM;
7826     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7827       // Reading constant memory is not treated as a read "effect" by the
7828       // function attr pass so we won't neither. Constants defined by TBAA are
7829       // similar. (We know we do not write it because it is constant.)
7830       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7831         if (GVar->isConstant())
7832           continue;
7833 
7834       if (GV->hasLocalLinkage())
7835         MLK = NO_GLOBAL_INTERNAL_MEM;
7836       else
7837         MLK = NO_GLOBAL_EXTERNAL_MEM;
7838     } else if (isa<ConstantPointerNull>(Obj) &&
7839                !NullPointerIsDefined(getAssociatedFunction(),
7840                                      Ptr.getType()->getPointerAddressSpace())) {
7841       continue;
7842     } else if (isa<AllocaInst>(Obj)) {
7843       MLK = NO_LOCAL_MEM;
7844     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7845       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7846           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7847       if (NoAliasAA.isAssumedNoAlias())
7848         MLK = NO_MALLOCED_MEM;
7849       else
7850         MLK = NO_UNKOWN_MEM;
7851     } else {
7852       MLK = NO_UNKOWN_MEM;
7853     }
7854 
7855     assert(MLK != NO_LOCATIONS && "No location specified!");
7856     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7857                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7858                       << "\n");
7859     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7860                               getAccessKindFromInst(&I));
7861   }
7862 
7863   LLVM_DEBUG(
7864       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7865              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7866 }
7867 
7868 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7869     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7870     bool &Changed) {
7871   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7872 
7873     // Skip non-pointer arguments.
7874     const Value *ArgOp = CB.getArgOperand(ArgNo);
7875     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7876       continue;
7877 
7878     // Skip readnone arguments.
7879     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7880     const auto &ArgOpMemLocationAA =
7881         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7882 
7883     if (ArgOpMemLocationAA.isAssumedReadNone())
7884       continue;
7885 
7886     // Categorize potentially accessed pointer arguments as if there was an
7887     // access instruction with them as pointer.
7888     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7889   }
7890 }
7891 
7892 AAMemoryLocation::MemoryLocationsKind
7893 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7894                                                   bool &Changed) {
7895   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7896                     << I << "\n");
7897 
7898   AAMemoryLocation::StateType AccessedLocs;
7899   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7900 
7901   if (auto *CB = dyn_cast<CallBase>(&I)) {
7902 
7903     // First check if we assume any memory is access is visible.
7904     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7905         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7906     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7907                       << " [" << CBMemLocationAA << "]\n");
7908 
7909     if (CBMemLocationAA.isAssumedReadNone())
7910       return NO_LOCATIONS;
7911 
7912     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7913       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7914                                 Changed, getAccessKindFromInst(&I));
7915       return AccessedLocs.getAssumed();
7916     }
7917 
7918     uint32_t CBAssumedNotAccessedLocs =
7919         CBMemLocationAA.getAssumedNotAccessedLocation();
7920 
7921     // Set the argmemonly and global bit as we handle them separately below.
7922     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7923         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7924 
7925     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7926       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7927         continue;
7928       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7929                                 getAccessKindFromInst(&I));
7930     }
7931 
7932     // Now handle global memory if it might be accessed. This is slightly tricky
7933     // as NO_GLOBAL_MEM has multiple bits set.
7934     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7935     if (HasGlobalAccesses) {
7936       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7937                             AccessKind Kind, MemoryLocationsKind MLK) {
7938         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7939                                   getAccessKindFromInst(&I));
7940         return true;
7941       };
7942       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7943               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7944         return AccessedLocs.getWorstState();
7945     }
7946 
7947     LLVM_DEBUG(
7948         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7949                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7950 
7951     // Now handle argument memory if it might be accessed.
7952     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7953     if (HasArgAccesses)
7954       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7955 
7956     LLVM_DEBUG(
7957         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7958                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7959 
7960     return AccessedLocs.getAssumed();
7961   }
7962 
7963   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7964     LLVM_DEBUG(
7965         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7966                << I << " [" << *Ptr << "]\n");
7967     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7968     return AccessedLocs.getAssumed();
7969   }
7970 
7971   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7972                     << I << "\n");
7973   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7974                             getAccessKindFromInst(&I));
7975   return AccessedLocs.getAssumed();
7976 }
7977 
7978 /// An AA to represent the memory behavior function attributes.
7979 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7980   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7981       : AAMemoryLocationImpl(IRP, A) {}
7982 
7983   /// See AbstractAttribute::updateImpl(Attributor &A).
7984   virtual ChangeStatus updateImpl(Attributor &A) override {
7985 
7986     const auto &MemBehaviorAA =
7987         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7988     if (MemBehaviorAA.isAssumedReadNone()) {
7989       if (MemBehaviorAA.isKnownReadNone())
7990         return indicateOptimisticFixpoint();
7991       assert(isAssumedReadNone() &&
7992              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7993       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7994       return ChangeStatus::UNCHANGED;
7995     }
7996 
7997     // The current assumed state used to determine a change.
7998     auto AssumedState = getAssumed();
7999     bool Changed = false;
8000 
8001     auto CheckRWInst = [&](Instruction &I) {
8002       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
8003       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
8004                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
8005       removeAssumedBits(inverseLocation(MLK, false, false));
8006       // Stop once only the valid bit set in the *not assumed location*, thus
8007       // once we don't actually exclude any memory locations in the state.
8008       return getAssumedNotAccessedLocation() != VALID_STATE;
8009     };
8010 
8011     bool UsedAssumedInformation = false;
8012     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
8013                                             UsedAssumedInformation))
8014       return indicatePessimisticFixpoint();
8015 
8016     Changed |= AssumedState != getAssumed();
8017     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8018   }
8019 
8020   /// See AbstractAttribute::trackStatistics()
8021   void trackStatistics() const override {
8022     if (isAssumedReadNone())
8023       STATS_DECLTRACK_FN_ATTR(readnone)
8024     else if (isAssumedArgMemOnly())
8025       STATS_DECLTRACK_FN_ATTR(argmemonly)
8026     else if (isAssumedInaccessibleMemOnly())
8027       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
8028     else if (isAssumedInaccessibleOrArgMemOnly())
8029       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
8030   }
8031 };
8032 
8033 /// AAMemoryLocation attribute for call sites.
8034 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
8035   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
8036       : AAMemoryLocationImpl(IRP, A) {}
8037 
8038   /// See AbstractAttribute::initialize(...).
8039   void initialize(Attributor &A) override {
8040     AAMemoryLocationImpl::initialize(A);
8041     Function *F = getAssociatedFunction();
8042     if (!F || F->isDeclaration())
8043       indicatePessimisticFixpoint();
8044   }
8045 
8046   /// See AbstractAttribute::updateImpl(...).
8047   ChangeStatus updateImpl(Attributor &A) override {
8048     // TODO: Once we have call site specific value information we can provide
8049     //       call site specific liveness liveness information and then it makes
8050     //       sense to specialize attributes for call sites arguments instead of
8051     //       redirecting requests to the callee argument.
8052     Function *F = getAssociatedFunction();
8053     const IRPosition &FnPos = IRPosition::function(*F);
8054     auto &FnAA =
8055         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
8056     bool Changed = false;
8057     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
8058                           AccessKind Kind, MemoryLocationsKind MLK) {
8059       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
8060                                 getAccessKindFromInst(I));
8061       return true;
8062     };
8063     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
8064       return indicatePessimisticFixpoint();
8065     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
8066   }
8067 
8068   /// See AbstractAttribute::trackStatistics()
8069   void trackStatistics() const override {
8070     if (isAssumedReadNone())
8071       STATS_DECLTRACK_CS_ATTR(readnone)
8072   }
8073 };
8074 
8075 /// ------------------ Value Constant Range Attribute -------------------------
8076 
8077 struct AAValueConstantRangeImpl : AAValueConstantRange {
8078   using StateType = IntegerRangeState;
8079   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8080       : AAValueConstantRange(IRP, A) {}
8081 
8082   /// See AbstractAttribute::initialize(..).
8083   void initialize(Attributor &A) override {
8084     if (A.hasSimplificationCallback(getIRPosition())) {
8085       indicatePessimisticFixpoint();
8086       return;
8087     }
8088 
8089     // Intersect a range given by SCEV.
8090     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8091 
8092     // Intersect a range given by LVI.
8093     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8094   }
8095 
8096   /// See AbstractAttribute::getAsStr().
8097   const std::string getAsStr() const override {
8098     std::string Str;
8099     llvm::raw_string_ostream OS(Str);
8100     OS << "range(" << getBitWidth() << ")<";
8101     getKnown().print(OS);
8102     OS << " / ";
8103     getAssumed().print(OS);
8104     OS << ">";
8105     return OS.str();
8106   }
8107 
8108   /// Helper function to get a SCEV expr for the associated value at program
8109   /// point \p I.
8110   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8111     if (!getAnchorScope())
8112       return nullptr;
8113 
8114     ScalarEvolution *SE =
8115         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8116             *getAnchorScope());
8117 
8118     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8119         *getAnchorScope());
8120 
8121     if (!SE || !LI)
8122       return nullptr;
8123 
8124     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8125     if (!I)
8126       return S;
8127 
8128     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8129   }
8130 
8131   /// Helper function to get a range from SCEV for the associated value at
8132   /// program point \p I.
8133   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8134                                          const Instruction *I = nullptr) const {
8135     if (!getAnchorScope())
8136       return getWorstState(getBitWidth());
8137 
8138     ScalarEvolution *SE =
8139         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8140             *getAnchorScope());
8141 
8142     const SCEV *S = getSCEV(A, I);
8143     if (!SE || !S)
8144       return getWorstState(getBitWidth());
8145 
8146     return SE->getUnsignedRange(S);
8147   }
8148 
8149   /// Helper function to get a range from LVI for the associated value at
8150   /// program point \p I.
8151   ConstantRange
8152   getConstantRangeFromLVI(Attributor &A,
8153                           const Instruction *CtxI = nullptr) const {
8154     if (!getAnchorScope())
8155       return getWorstState(getBitWidth());
8156 
8157     LazyValueInfo *LVI =
8158         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8159             *getAnchorScope());
8160 
8161     if (!LVI || !CtxI)
8162       return getWorstState(getBitWidth());
8163     return LVI->getConstantRange(&getAssociatedValue(),
8164                                  const_cast<Instruction *>(CtxI));
8165   }
8166 
8167   /// Return true if \p CtxI is valid for querying outside analyses.
8168   /// This basically makes sure we do not ask intra-procedural analysis
8169   /// about a context in the wrong function or a context that violates
8170   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8171   /// if the original context of this AA is OK or should be considered invalid.
8172   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8173                                                const Instruction *CtxI,
8174                                                bool AllowAACtxI) const {
8175     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8176       return false;
8177 
8178     // Our context might be in a different function, neither intra-procedural
8179     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8180     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8181       return false;
8182 
8183     // If the context is not dominated by the value there are paths to the
8184     // context that do not define the value. This cannot be handled by
8185     // LazyValueInfo so we need to bail.
8186     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8187       InformationCache &InfoCache = A.getInfoCache();
8188       const DominatorTree *DT =
8189           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8190               *I->getFunction());
8191       return DT && DT->dominates(I, CtxI);
8192     }
8193 
8194     return true;
8195   }
8196 
8197   /// See AAValueConstantRange::getKnownConstantRange(..).
8198   ConstantRange
8199   getKnownConstantRange(Attributor &A,
8200                         const Instruction *CtxI = nullptr) const override {
8201     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8202                                                  /* AllowAACtxI */ false))
8203       return getKnown();
8204 
8205     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8206     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8207     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8208   }
8209 
8210   /// See AAValueConstantRange::getAssumedConstantRange(..).
8211   ConstantRange
8212   getAssumedConstantRange(Attributor &A,
8213                           const Instruction *CtxI = nullptr) const override {
8214     // TODO: Make SCEV use Attributor assumption.
8215     //       We may be able to bound a variable range via assumptions in
8216     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8217     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8218     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8219                                                  /* AllowAACtxI */ false))
8220       return getAssumed();
8221 
8222     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8223     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8224     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8225   }
8226 
8227   /// Helper function to create MDNode for range metadata.
8228   static MDNode *
8229   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8230                             const ConstantRange &AssumedConstantRange) {
8231     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8232                                   Ty, AssumedConstantRange.getLower())),
8233                               ConstantAsMetadata::get(ConstantInt::get(
8234                                   Ty, AssumedConstantRange.getUpper()))};
8235     return MDNode::get(Ctx, LowAndHigh);
8236   }
8237 
8238   /// Return true if \p Assumed is included in \p KnownRanges.
8239   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8240 
8241     if (Assumed.isFullSet())
8242       return false;
8243 
8244     if (!KnownRanges)
8245       return true;
8246 
8247     // If multiple ranges are annotated in IR, we give up to annotate assumed
8248     // range for now.
8249 
8250     // TODO:  If there exists a known range which containts assumed range, we
8251     // can say assumed range is better.
8252     if (KnownRanges->getNumOperands() > 2)
8253       return false;
8254 
8255     ConstantInt *Lower =
8256         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8257     ConstantInt *Upper =
8258         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8259 
8260     ConstantRange Known(Lower->getValue(), Upper->getValue());
8261     return Known.contains(Assumed) && Known != Assumed;
8262   }
8263 
8264   /// Helper function to set range metadata.
8265   static bool
8266   setRangeMetadataIfisBetterRange(Instruction *I,
8267                                   const ConstantRange &AssumedConstantRange) {
8268     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8269     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8270       if (!AssumedConstantRange.isEmptySet()) {
8271         I->setMetadata(LLVMContext::MD_range,
8272                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8273                                                  AssumedConstantRange));
8274         return true;
8275       }
8276     }
8277     return false;
8278   }
8279 
8280   /// See AbstractAttribute::manifest()
8281   ChangeStatus manifest(Attributor &A) override {
8282     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8283     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8284     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8285 
8286     auto &V = getAssociatedValue();
8287     if (!AssumedConstantRange.isEmptySet() &&
8288         !AssumedConstantRange.isSingleElement()) {
8289       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8290         assert(I == getCtxI() && "Should not annotate an instruction which is "
8291                                  "not the context instruction");
8292         if (isa<CallInst>(I) || isa<LoadInst>(I))
8293           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8294             Changed = ChangeStatus::CHANGED;
8295       }
8296     }
8297 
8298     return Changed;
8299   }
8300 };
8301 
8302 struct AAValueConstantRangeArgument final
8303     : AAArgumentFromCallSiteArguments<
8304           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8305           true /* BridgeCallBaseContext */> {
8306   using Base = AAArgumentFromCallSiteArguments<
8307       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8308       true /* BridgeCallBaseContext */>;
8309   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8310       : Base(IRP, A) {}
8311 
8312   /// See AbstractAttribute::initialize(..).
8313   void initialize(Attributor &A) override {
8314     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8315       indicatePessimisticFixpoint();
8316     } else {
8317       Base::initialize(A);
8318     }
8319   }
8320 
8321   /// See AbstractAttribute::trackStatistics()
8322   void trackStatistics() const override {
8323     STATS_DECLTRACK_ARG_ATTR(value_range)
8324   }
8325 };
8326 
8327 struct AAValueConstantRangeReturned
8328     : AAReturnedFromReturnedValues<AAValueConstantRange,
8329                                    AAValueConstantRangeImpl,
8330                                    AAValueConstantRangeImpl::StateType,
8331                                    /* PropogateCallBaseContext */ true> {
8332   using Base =
8333       AAReturnedFromReturnedValues<AAValueConstantRange,
8334                                    AAValueConstantRangeImpl,
8335                                    AAValueConstantRangeImpl::StateType,
8336                                    /* PropogateCallBaseContext */ true>;
8337   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8338       : Base(IRP, A) {}
8339 
8340   /// See AbstractAttribute::initialize(...).
8341   void initialize(Attributor &A) override {}
8342 
8343   /// See AbstractAttribute::trackStatistics()
8344   void trackStatistics() const override {
8345     STATS_DECLTRACK_FNRET_ATTR(value_range)
8346   }
8347 };
8348 
8349 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8350   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8351       : AAValueConstantRangeImpl(IRP, A) {}
8352 
8353   /// See AbstractAttribute::initialize(...).
8354   void initialize(Attributor &A) override {
8355     AAValueConstantRangeImpl::initialize(A);
8356     if (isAtFixpoint())
8357       return;
8358 
8359     Value &V = getAssociatedValue();
8360 
8361     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8362       unionAssumed(ConstantRange(C->getValue()));
8363       indicateOptimisticFixpoint();
8364       return;
8365     }
8366 
8367     if (isa<UndefValue>(&V)) {
8368       // Collapse the undef state to 0.
8369       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8370       indicateOptimisticFixpoint();
8371       return;
8372     }
8373 
8374     if (isa<CallBase>(&V))
8375       return;
8376 
8377     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8378       return;
8379 
8380     // If it is a load instruction with range metadata, use it.
8381     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8382       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8383         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8384         return;
8385       }
8386 
8387     // We can work with PHI and select instruction as we traverse their operands
8388     // during update.
8389     if (isa<SelectInst>(V) || isa<PHINode>(V))
8390       return;
8391 
8392     // Otherwise we give up.
8393     indicatePessimisticFixpoint();
8394 
8395     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8396                       << getAssociatedValue() << "\n");
8397   }
8398 
8399   bool calculateBinaryOperator(
8400       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8401       const Instruction *CtxI,
8402       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8403     Value *LHS = BinOp->getOperand(0);
8404     Value *RHS = BinOp->getOperand(1);
8405 
8406     // Simplify the operands first.
8407     bool UsedAssumedInformation = false;
8408     const auto &SimplifiedLHS =
8409         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8410                                *this, UsedAssumedInformation);
8411     if (!SimplifiedLHS.hasValue())
8412       return true;
8413     if (!SimplifiedLHS.getValue())
8414       return false;
8415     LHS = *SimplifiedLHS;
8416 
8417     const auto &SimplifiedRHS =
8418         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8419                                *this, UsedAssumedInformation);
8420     if (!SimplifiedRHS.hasValue())
8421       return true;
8422     if (!SimplifiedRHS.getValue())
8423       return false;
8424     RHS = *SimplifiedRHS;
8425 
8426     // TODO: Allow non integers as well.
8427     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8428       return false;
8429 
8430     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8431         *this, IRPosition::value(*LHS, getCallBaseContext()),
8432         DepClassTy::REQUIRED);
8433     QuerriedAAs.push_back(&LHSAA);
8434     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8435 
8436     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8437         *this, IRPosition::value(*RHS, getCallBaseContext()),
8438         DepClassTy::REQUIRED);
8439     QuerriedAAs.push_back(&RHSAA);
8440     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8441 
8442     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8443 
8444     T.unionAssumed(AssumedRange);
8445 
8446     // TODO: Track a known state too.
8447 
8448     return T.isValidState();
8449   }
8450 
8451   bool calculateCastInst(
8452       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8453       const Instruction *CtxI,
8454       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8455     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8456     // TODO: Allow non integers as well.
8457     Value *OpV = CastI->getOperand(0);
8458 
8459     // Simplify the operand first.
8460     bool UsedAssumedInformation = false;
8461     const auto &SimplifiedOpV =
8462         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8463                                *this, UsedAssumedInformation);
8464     if (!SimplifiedOpV.hasValue())
8465       return true;
8466     if (!SimplifiedOpV.getValue())
8467       return false;
8468     OpV = *SimplifiedOpV;
8469 
8470     if (!OpV->getType()->isIntegerTy())
8471       return false;
8472 
8473     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8474         *this, IRPosition::value(*OpV, getCallBaseContext()),
8475         DepClassTy::REQUIRED);
8476     QuerriedAAs.push_back(&OpAA);
8477     T.unionAssumed(
8478         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8479     return T.isValidState();
8480   }
8481 
8482   bool
8483   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8484                    const Instruction *CtxI,
8485                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8486     Value *LHS = CmpI->getOperand(0);
8487     Value *RHS = CmpI->getOperand(1);
8488 
8489     // Simplify the operands first.
8490     bool UsedAssumedInformation = false;
8491     const auto &SimplifiedLHS =
8492         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8493                                *this, UsedAssumedInformation);
8494     if (!SimplifiedLHS.hasValue())
8495       return true;
8496     if (!SimplifiedLHS.getValue())
8497       return false;
8498     LHS = *SimplifiedLHS;
8499 
8500     const auto &SimplifiedRHS =
8501         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8502                                *this, UsedAssumedInformation);
8503     if (!SimplifiedRHS.hasValue())
8504       return true;
8505     if (!SimplifiedRHS.getValue())
8506       return false;
8507     RHS = *SimplifiedRHS;
8508 
8509     // TODO: Allow non integers as well.
8510     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8511       return false;
8512 
8513     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8514         *this, IRPosition::value(*LHS, getCallBaseContext()),
8515         DepClassTy::REQUIRED);
8516     QuerriedAAs.push_back(&LHSAA);
8517     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8518         *this, IRPosition::value(*RHS, getCallBaseContext()),
8519         DepClassTy::REQUIRED);
8520     QuerriedAAs.push_back(&RHSAA);
8521     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8522     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8523 
8524     // If one of them is empty set, we can't decide.
8525     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8526       return true;
8527 
8528     bool MustTrue = false, MustFalse = false;
8529 
8530     auto AllowedRegion =
8531         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8532 
8533     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8534       MustFalse = true;
8535 
8536     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8537       MustTrue = true;
8538 
8539     assert((!MustTrue || !MustFalse) &&
8540            "Either MustTrue or MustFalse should be false!");
8541 
8542     if (MustTrue)
8543       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8544     else if (MustFalse)
8545       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8546     else
8547       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8548 
8549     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8550                       << " " << RHSAA << "\n");
8551 
8552     // TODO: Track a known state too.
8553     return T.isValidState();
8554   }
8555 
8556   /// See AbstractAttribute::updateImpl(...).
8557   ChangeStatus updateImpl(Attributor &A) override {
8558     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8559                             IntegerRangeState &T, bool Stripped) -> bool {
8560       Instruction *I = dyn_cast<Instruction>(&V);
8561       if (!I || isa<CallBase>(I)) {
8562 
8563         // Simplify the operand first.
8564         bool UsedAssumedInformation = false;
8565         const auto &SimplifiedOpV =
8566             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8567                                    *this, UsedAssumedInformation);
8568         if (!SimplifiedOpV.hasValue())
8569           return true;
8570         if (!SimplifiedOpV.getValue())
8571           return false;
8572         Value *VPtr = *SimplifiedOpV;
8573 
8574         // If the value is not instruction, we query AA to Attributor.
8575         const auto &AA = A.getAAFor<AAValueConstantRange>(
8576             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8577             DepClassTy::REQUIRED);
8578 
8579         // Clamp operator is not used to utilize a program point CtxI.
8580         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8581 
8582         return T.isValidState();
8583       }
8584 
8585       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8586       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8587         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8588           return false;
8589       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8590         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8591           return false;
8592       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8593         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8594           return false;
8595       } else {
8596         // Give up with other instructions.
8597         // TODO: Add other instructions
8598 
8599         T.indicatePessimisticFixpoint();
8600         return false;
8601       }
8602 
8603       // Catch circular reasoning in a pessimistic way for now.
8604       // TODO: Check how the range evolves and if we stripped anything, see also
8605       //       AADereferenceable or AAAlign for similar situations.
8606       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8607         if (QueriedAA != this)
8608           continue;
8609         // If we are in a stady state we do not need to worry.
8610         if (T.getAssumed() == getState().getAssumed())
8611           continue;
8612         T.indicatePessimisticFixpoint();
8613       }
8614 
8615       return T.isValidState();
8616     };
8617 
8618     IntegerRangeState T(getBitWidth());
8619 
8620     bool UsedAssumedInformation = false;
8621     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8622                                                   VisitValueCB, getCtxI(),
8623                                                   UsedAssumedInformation,
8624                                                   /* UseValueSimplify */ false))
8625       return indicatePessimisticFixpoint();
8626 
8627     // Ensure that long def-use chains can't cause circular reasoning either by
8628     // introducing a cutoff below.
8629     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8630       return ChangeStatus::UNCHANGED;
8631     if (++NumChanges > MaxNumChanges) {
8632       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8633                         << " but only " << MaxNumChanges
8634                         << " are allowed to avoid cyclic reasoning.");
8635       return indicatePessimisticFixpoint();
8636     }
8637     return ChangeStatus::CHANGED;
8638   }
8639 
8640   /// See AbstractAttribute::trackStatistics()
8641   void trackStatistics() const override {
8642     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8643   }
8644 
8645   /// Tracker to bail after too many widening steps of the constant range.
8646   int NumChanges = 0;
8647 
8648   /// Upper bound for the number of allowed changes (=widening steps) for the
8649   /// constant range before we give up.
8650   static constexpr int MaxNumChanges = 5;
8651 };
8652 
8653 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8654   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8655       : AAValueConstantRangeImpl(IRP, A) {}
8656 
8657   /// See AbstractAttribute::initialize(...).
8658   ChangeStatus updateImpl(Attributor &A) override {
8659     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8660                      "not be called");
8661   }
8662 
8663   /// See AbstractAttribute::trackStatistics()
8664   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8665 };
8666 
8667 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8668   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8669       : AAValueConstantRangeFunction(IRP, A) {}
8670 
8671   /// See AbstractAttribute::trackStatistics()
8672   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8673 };
8674 
8675 struct AAValueConstantRangeCallSiteReturned
8676     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8677                                      AAValueConstantRangeImpl,
8678                                      AAValueConstantRangeImpl::StateType,
8679                                      /* IntroduceCallBaseContext */ true> {
8680   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8681       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8682                                        AAValueConstantRangeImpl,
8683                                        AAValueConstantRangeImpl::StateType,
8684                                        /* IntroduceCallBaseContext */ true>(IRP,
8685                                                                             A) {
8686   }
8687 
8688   /// See AbstractAttribute::initialize(...).
8689   void initialize(Attributor &A) override {
8690     // If it is a load instruction with range metadata, use the metadata.
8691     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8692       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8693         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8694 
8695     AAValueConstantRangeImpl::initialize(A);
8696   }
8697 
8698   /// See AbstractAttribute::trackStatistics()
8699   void trackStatistics() const override {
8700     STATS_DECLTRACK_CSRET_ATTR(value_range)
8701   }
8702 };
8703 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8704   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8705       : AAValueConstantRangeFloating(IRP, A) {}
8706 
8707   /// See AbstractAttribute::manifest()
8708   ChangeStatus manifest(Attributor &A) override {
8709     return ChangeStatus::UNCHANGED;
8710   }
8711 
8712   /// See AbstractAttribute::trackStatistics()
8713   void trackStatistics() const override {
8714     STATS_DECLTRACK_CSARG_ATTR(value_range)
8715   }
8716 };
8717 
8718 /// ------------------ Potential Values Attribute -------------------------
8719 
8720 struct AAPotentialValuesImpl : AAPotentialValues {
8721   using StateType = PotentialConstantIntValuesState;
8722 
8723   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8724       : AAPotentialValues(IRP, A) {}
8725 
8726   /// See AbstractAttribute::initialize(..).
8727   void initialize(Attributor &A) override {
8728     if (A.hasSimplificationCallback(getIRPosition()))
8729       indicatePessimisticFixpoint();
8730     else
8731       AAPotentialValues::initialize(A);
8732   }
8733 
8734   /// See AbstractAttribute::getAsStr().
8735   const std::string getAsStr() const override {
8736     std::string Str;
8737     llvm::raw_string_ostream OS(Str);
8738     OS << getState();
8739     return OS.str();
8740   }
8741 
8742   /// See AbstractAttribute::updateImpl(...).
8743   ChangeStatus updateImpl(Attributor &A) override {
8744     return indicatePessimisticFixpoint();
8745   }
8746 };
8747 
8748 struct AAPotentialValuesArgument final
8749     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8750                                       PotentialConstantIntValuesState> {
8751   using Base =
8752       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8753                                       PotentialConstantIntValuesState>;
8754   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8755       : Base(IRP, A) {}
8756 
8757   /// See AbstractAttribute::initialize(..).
8758   void initialize(Attributor &A) override {
8759     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8760       indicatePessimisticFixpoint();
8761     } else {
8762       Base::initialize(A);
8763     }
8764   }
8765 
8766   /// See AbstractAttribute::trackStatistics()
8767   void trackStatistics() const override {
8768     STATS_DECLTRACK_ARG_ATTR(potential_values)
8769   }
8770 };
8771 
8772 struct AAPotentialValuesReturned
8773     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8774   using Base =
8775       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8776   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8777       : Base(IRP, A) {}
8778 
8779   /// See AbstractAttribute::trackStatistics()
8780   void trackStatistics() const override {
8781     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8782   }
8783 };
8784 
8785 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8786   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8787       : AAPotentialValuesImpl(IRP, A) {}
8788 
8789   /// See AbstractAttribute::initialize(..).
8790   void initialize(Attributor &A) override {
8791     AAPotentialValuesImpl::initialize(A);
8792     if (isAtFixpoint())
8793       return;
8794 
8795     Value &V = getAssociatedValue();
8796 
8797     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8798       unionAssumed(C->getValue());
8799       indicateOptimisticFixpoint();
8800       return;
8801     }
8802 
8803     if (isa<UndefValue>(&V)) {
8804       unionAssumedWithUndef();
8805       indicateOptimisticFixpoint();
8806       return;
8807     }
8808 
8809     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8810       return;
8811 
8812     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8813       return;
8814 
8815     indicatePessimisticFixpoint();
8816 
8817     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8818                       << getAssociatedValue() << "\n");
8819   }
8820 
8821   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8822                                 const APInt &RHS) {
8823     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
8824   }
8825 
8826   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8827                                  uint32_t ResultBitWidth) {
8828     Instruction::CastOps CastOp = CI->getOpcode();
8829     switch (CastOp) {
8830     default:
8831       llvm_unreachable("unsupported or not integer cast");
8832     case Instruction::Trunc:
8833       return Src.trunc(ResultBitWidth);
8834     case Instruction::SExt:
8835       return Src.sext(ResultBitWidth);
8836     case Instruction::ZExt:
8837       return Src.zext(ResultBitWidth);
8838     case Instruction::BitCast:
8839       return Src;
8840     }
8841   }
8842 
8843   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8844                                        const APInt &LHS, const APInt &RHS,
8845                                        bool &SkipOperation, bool &Unsupported) {
8846     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8847     // Unsupported is set to true when the binary operator is not supported.
8848     // SkipOperation is set to true when UB occur with the given operand pair
8849     // (LHS, RHS).
8850     // TODO: we should look at nsw and nuw keywords to handle operations
8851     //       that create poison or undef value.
8852     switch (BinOpcode) {
8853     default:
8854       Unsupported = true;
8855       return LHS;
8856     case Instruction::Add:
8857       return LHS + RHS;
8858     case Instruction::Sub:
8859       return LHS - RHS;
8860     case Instruction::Mul:
8861       return LHS * RHS;
8862     case Instruction::UDiv:
8863       if (RHS.isZero()) {
8864         SkipOperation = true;
8865         return LHS;
8866       }
8867       return LHS.udiv(RHS);
8868     case Instruction::SDiv:
8869       if (RHS.isZero()) {
8870         SkipOperation = true;
8871         return LHS;
8872       }
8873       return LHS.sdiv(RHS);
8874     case Instruction::URem:
8875       if (RHS.isZero()) {
8876         SkipOperation = true;
8877         return LHS;
8878       }
8879       return LHS.urem(RHS);
8880     case Instruction::SRem:
8881       if (RHS.isZero()) {
8882         SkipOperation = true;
8883         return LHS;
8884       }
8885       return LHS.srem(RHS);
8886     case Instruction::Shl:
8887       return LHS.shl(RHS);
8888     case Instruction::LShr:
8889       return LHS.lshr(RHS);
8890     case Instruction::AShr:
8891       return LHS.ashr(RHS);
8892     case Instruction::And:
8893       return LHS & RHS;
8894     case Instruction::Or:
8895       return LHS | RHS;
8896     case Instruction::Xor:
8897       return LHS ^ RHS;
8898     }
8899   }
8900 
8901   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8902                                            const APInt &LHS, const APInt &RHS) {
8903     bool SkipOperation = false;
8904     bool Unsupported = false;
8905     APInt Result =
8906         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8907     if (Unsupported)
8908       return false;
8909     // If SkipOperation is true, we can ignore this operand pair (L, R).
8910     if (!SkipOperation)
8911       unionAssumed(Result);
8912     return isValidState();
8913   }
8914 
8915   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8916     auto AssumedBefore = getAssumed();
8917     Value *LHS = ICI->getOperand(0);
8918     Value *RHS = ICI->getOperand(1);
8919 
8920     // Simplify the operands first.
8921     bool UsedAssumedInformation = false;
8922     const auto &SimplifiedLHS =
8923         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8924                                *this, UsedAssumedInformation);
8925     if (!SimplifiedLHS.hasValue())
8926       return ChangeStatus::UNCHANGED;
8927     if (!SimplifiedLHS.getValue())
8928       return indicatePessimisticFixpoint();
8929     LHS = *SimplifiedLHS;
8930 
8931     const auto &SimplifiedRHS =
8932         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8933                                *this, UsedAssumedInformation);
8934     if (!SimplifiedRHS.hasValue())
8935       return ChangeStatus::UNCHANGED;
8936     if (!SimplifiedRHS.getValue())
8937       return indicatePessimisticFixpoint();
8938     RHS = *SimplifiedRHS;
8939 
8940     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8941       return indicatePessimisticFixpoint();
8942 
8943     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8944                                                 DepClassTy::REQUIRED);
8945     if (!LHSAA.isValidState())
8946       return indicatePessimisticFixpoint();
8947 
8948     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8949                                                 DepClassTy::REQUIRED);
8950     if (!RHSAA.isValidState())
8951       return indicatePessimisticFixpoint();
8952 
8953     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8954     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8955 
8956     // TODO: make use of undef flag to limit potential values aggressively.
8957     bool MaybeTrue = false, MaybeFalse = false;
8958     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8959     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8960       // The result of any comparison between undefs can be soundly replaced
8961       // with undef.
8962       unionAssumedWithUndef();
8963     } else if (LHSAA.undefIsContained()) {
8964       for (const APInt &R : RHSAAPVS) {
8965         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8966         MaybeTrue |= CmpResult;
8967         MaybeFalse |= !CmpResult;
8968         if (MaybeTrue & MaybeFalse)
8969           return indicatePessimisticFixpoint();
8970       }
8971     } else if (RHSAA.undefIsContained()) {
8972       for (const APInt &L : LHSAAPVS) {
8973         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8974         MaybeTrue |= CmpResult;
8975         MaybeFalse |= !CmpResult;
8976         if (MaybeTrue & MaybeFalse)
8977           return indicatePessimisticFixpoint();
8978       }
8979     } else {
8980       for (const APInt &L : LHSAAPVS) {
8981         for (const APInt &R : RHSAAPVS) {
8982           bool CmpResult = calculateICmpInst(ICI, L, R);
8983           MaybeTrue |= CmpResult;
8984           MaybeFalse |= !CmpResult;
8985           if (MaybeTrue & MaybeFalse)
8986             return indicatePessimisticFixpoint();
8987         }
8988       }
8989     }
8990     if (MaybeTrue)
8991       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8992     if (MaybeFalse)
8993       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8994     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8995                                          : ChangeStatus::CHANGED;
8996   }
8997 
8998   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8999     auto AssumedBefore = getAssumed();
9000     Value *LHS = SI->getTrueValue();
9001     Value *RHS = SI->getFalseValue();
9002 
9003     // Simplify the operands first.
9004     bool UsedAssumedInformation = false;
9005     const auto &SimplifiedLHS =
9006         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9007                                *this, UsedAssumedInformation);
9008     if (!SimplifiedLHS.hasValue())
9009       return ChangeStatus::UNCHANGED;
9010     if (!SimplifiedLHS.getValue())
9011       return indicatePessimisticFixpoint();
9012     LHS = *SimplifiedLHS;
9013 
9014     const auto &SimplifiedRHS =
9015         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9016                                *this, UsedAssumedInformation);
9017     if (!SimplifiedRHS.hasValue())
9018       return ChangeStatus::UNCHANGED;
9019     if (!SimplifiedRHS.getValue())
9020       return indicatePessimisticFixpoint();
9021     RHS = *SimplifiedRHS;
9022 
9023     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9024       return indicatePessimisticFixpoint();
9025 
9026     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
9027                                                   UsedAssumedInformation);
9028 
9029     // Check if we only need one operand.
9030     bool OnlyLeft = false, OnlyRight = false;
9031     if (C.hasValue() && *C && (*C)->isOneValue())
9032       OnlyLeft = true;
9033     else if (C.hasValue() && *C && (*C)->isZeroValue())
9034       OnlyRight = true;
9035 
9036     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
9037     if (!OnlyRight) {
9038       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9039                                              DepClassTy::REQUIRED);
9040       if (!LHSAA->isValidState())
9041         return indicatePessimisticFixpoint();
9042     }
9043     if (!OnlyLeft) {
9044       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9045                                              DepClassTy::REQUIRED);
9046       if (!RHSAA->isValidState())
9047         return indicatePessimisticFixpoint();
9048     }
9049 
9050     if (!LHSAA || !RHSAA) {
9051       // select (true/false), lhs, rhs
9052       auto *OpAA = LHSAA ? LHSAA : RHSAA;
9053 
9054       if (OpAA->undefIsContained())
9055         unionAssumedWithUndef();
9056       else
9057         unionAssumed(*OpAA);
9058 
9059     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
9060       // select i1 *, undef , undef => undef
9061       unionAssumedWithUndef();
9062     } else {
9063       unionAssumed(*LHSAA);
9064       unionAssumed(*RHSAA);
9065     }
9066     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9067                                          : ChangeStatus::CHANGED;
9068   }
9069 
9070   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
9071     auto AssumedBefore = getAssumed();
9072     if (!CI->isIntegerCast())
9073       return indicatePessimisticFixpoint();
9074     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
9075     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
9076     Value *Src = CI->getOperand(0);
9077 
9078     // Simplify the operand first.
9079     bool UsedAssumedInformation = false;
9080     const auto &SimplifiedSrc =
9081         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
9082                                *this, UsedAssumedInformation);
9083     if (!SimplifiedSrc.hasValue())
9084       return ChangeStatus::UNCHANGED;
9085     if (!SimplifiedSrc.getValue())
9086       return indicatePessimisticFixpoint();
9087     Src = *SimplifiedSrc;
9088 
9089     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
9090                                                 DepClassTy::REQUIRED);
9091     if (!SrcAA.isValidState())
9092       return indicatePessimisticFixpoint();
9093     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
9094     if (SrcAA.undefIsContained())
9095       unionAssumedWithUndef();
9096     else {
9097       for (const APInt &S : SrcAAPVS) {
9098         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9099         unionAssumed(T);
9100       }
9101     }
9102     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9103                                          : ChangeStatus::CHANGED;
9104   }
9105 
9106   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9107     auto AssumedBefore = getAssumed();
9108     Value *LHS = BinOp->getOperand(0);
9109     Value *RHS = BinOp->getOperand(1);
9110 
9111     // Simplify the operands first.
9112     bool UsedAssumedInformation = false;
9113     const auto &SimplifiedLHS =
9114         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
9115                                *this, UsedAssumedInformation);
9116     if (!SimplifiedLHS.hasValue())
9117       return ChangeStatus::UNCHANGED;
9118     if (!SimplifiedLHS.getValue())
9119       return indicatePessimisticFixpoint();
9120     LHS = *SimplifiedLHS;
9121 
9122     const auto &SimplifiedRHS =
9123         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
9124                                *this, UsedAssumedInformation);
9125     if (!SimplifiedRHS.hasValue())
9126       return ChangeStatus::UNCHANGED;
9127     if (!SimplifiedRHS.getValue())
9128       return indicatePessimisticFixpoint();
9129     RHS = *SimplifiedRHS;
9130 
9131     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9132       return indicatePessimisticFixpoint();
9133 
9134     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9135                                                 DepClassTy::REQUIRED);
9136     if (!LHSAA.isValidState())
9137       return indicatePessimisticFixpoint();
9138 
9139     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9140                                                 DepClassTy::REQUIRED);
9141     if (!RHSAA.isValidState())
9142       return indicatePessimisticFixpoint();
9143 
9144     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
9145     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
9146     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9147 
9148     // TODO: make use of undef flag to limit potential values aggressively.
9149     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9150       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9151         return indicatePessimisticFixpoint();
9152     } else if (LHSAA.undefIsContained()) {
9153       for (const APInt &R : RHSAAPVS) {
9154         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9155           return indicatePessimisticFixpoint();
9156       }
9157     } else if (RHSAA.undefIsContained()) {
9158       for (const APInt &L : LHSAAPVS) {
9159         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9160           return indicatePessimisticFixpoint();
9161       }
9162     } else {
9163       for (const APInt &L : LHSAAPVS) {
9164         for (const APInt &R : RHSAAPVS) {
9165           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9166             return indicatePessimisticFixpoint();
9167         }
9168       }
9169     }
9170     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9171                                          : ChangeStatus::CHANGED;
9172   }
9173 
9174   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9175     auto AssumedBefore = getAssumed();
9176     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9177       Value *IncomingValue = PHI->getIncomingValue(u);
9178 
9179       // Simplify the operand first.
9180       bool UsedAssumedInformation = false;
9181       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9182           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9183           UsedAssumedInformation);
9184       if (!SimplifiedIncomingValue.hasValue())
9185         continue;
9186       if (!SimplifiedIncomingValue.getValue())
9187         return indicatePessimisticFixpoint();
9188       IncomingValue = *SimplifiedIncomingValue;
9189 
9190       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9191           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9192       if (!PotentialValuesAA.isValidState())
9193         return indicatePessimisticFixpoint();
9194       if (PotentialValuesAA.undefIsContained())
9195         unionAssumedWithUndef();
9196       else
9197         unionAssumed(PotentialValuesAA.getAssumed());
9198     }
9199     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9200                                          : ChangeStatus::CHANGED;
9201   }
9202 
9203   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
9204     if (!L.getType()->isIntegerTy())
9205       return indicatePessimisticFixpoint();
9206 
9207     auto Union = [&](Value &V) {
9208       if (isa<UndefValue>(V)) {
9209         unionAssumedWithUndef();
9210         return true;
9211       }
9212       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
9213         unionAssumed(CI->getValue());
9214         return true;
9215       }
9216       return false;
9217     };
9218     auto AssumedBefore = getAssumed();
9219 
9220     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
9221       return indicatePessimisticFixpoint();
9222 
9223     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9224                                          : ChangeStatus::CHANGED;
9225   }
9226 
9227   /// See AbstractAttribute::updateImpl(...).
9228   ChangeStatus updateImpl(Attributor &A) override {
9229     Value &V = getAssociatedValue();
9230     Instruction *I = dyn_cast<Instruction>(&V);
9231 
9232     if (auto *ICI = dyn_cast<ICmpInst>(I))
9233       return updateWithICmpInst(A, ICI);
9234 
9235     if (auto *SI = dyn_cast<SelectInst>(I))
9236       return updateWithSelectInst(A, SI);
9237 
9238     if (auto *CI = dyn_cast<CastInst>(I))
9239       return updateWithCastInst(A, CI);
9240 
9241     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9242       return updateWithBinaryOperator(A, BinOp);
9243 
9244     if (auto *PHI = dyn_cast<PHINode>(I))
9245       return updateWithPHINode(A, PHI);
9246 
9247     if (auto *L = dyn_cast<LoadInst>(I))
9248       return updateWithLoad(A, *L);
9249 
9250     return indicatePessimisticFixpoint();
9251   }
9252 
9253   /// See AbstractAttribute::trackStatistics()
9254   void trackStatistics() const override {
9255     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9256   }
9257 };
9258 
9259 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9260   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9261       : AAPotentialValuesImpl(IRP, A) {}
9262 
9263   /// See AbstractAttribute::initialize(...).
9264   ChangeStatus updateImpl(Attributor &A) override {
9265     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9266                      "not be called");
9267   }
9268 
9269   /// See AbstractAttribute::trackStatistics()
9270   void trackStatistics() const override {
9271     STATS_DECLTRACK_FN_ATTR(potential_values)
9272   }
9273 };
9274 
9275 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9276   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9277       : AAPotentialValuesFunction(IRP, A) {}
9278 
9279   /// See AbstractAttribute::trackStatistics()
9280   void trackStatistics() const override {
9281     STATS_DECLTRACK_CS_ATTR(potential_values)
9282   }
9283 };
9284 
9285 struct AAPotentialValuesCallSiteReturned
9286     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9287   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9288       : AACallSiteReturnedFromReturned<AAPotentialValues,
9289                                        AAPotentialValuesImpl>(IRP, A) {}
9290 
9291   /// See AbstractAttribute::trackStatistics()
9292   void trackStatistics() const override {
9293     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9294   }
9295 };
9296 
9297 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9298   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9299       : AAPotentialValuesFloating(IRP, A) {}
9300 
9301   /// See AbstractAttribute::initialize(..).
9302   void initialize(Attributor &A) override {
9303     AAPotentialValuesImpl::initialize(A);
9304     if (isAtFixpoint())
9305       return;
9306 
9307     Value &V = getAssociatedValue();
9308 
9309     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9310       unionAssumed(C->getValue());
9311       indicateOptimisticFixpoint();
9312       return;
9313     }
9314 
9315     if (isa<UndefValue>(&V)) {
9316       unionAssumedWithUndef();
9317       indicateOptimisticFixpoint();
9318       return;
9319     }
9320   }
9321 
9322   /// See AbstractAttribute::updateImpl(...).
9323   ChangeStatus updateImpl(Attributor &A) override {
9324     Value &V = getAssociatedValue();
9325     auto AssumedBefore = getAssumed();
9326     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9327                                              DepClassTy::REQUIRED);
9328     const auto &S = AA.getAssumed();
9329     unionAssumed(S);
9330     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9331                                          : ChangeStatus::CHANGED;
9332   }
9333 
9334   /// See AbstractAttribute::trackStatistics()
9335   void trackStatistics() const override {
9336     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9337   }
9338 };
9339 
9340 /// ------------------------ NoUndef Attribute ---------------------------------
9341 struct AANoUndefImpl : AANoUndef {
9342   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9343 
9344   /// See AbstractAttribute::initialize(...).
9345   void initialize(Attributor &A) override {
9346     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9347       indicateOptimisticFixpoint();
9348       return;
9349     }
9350     Value &V = getAssociatedValue();
9351     if (isa<UndefValue>(V))
9352       indicatePessimisticFixpoint();
9353     else if (isa<FreezeInst>(V))
9354       indicateOptimisticFixpoint();
9355     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9356              isGuaranteedNotToBeUndefOrPoison(&V))
9357       indicateOptimisticFixpoint();
9358     else
9359       AANoUndef::initialize(A);
9360   }
9361 
9362   /// See followUsesInMBEC
9363   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9364                        AANoUndef::StateType &State) {
9365     const Value *UseV = U->get();
9366     const DominatorTree *DT = nullptr;
9367     AssumptionCache *AC = nullptr;
9368     InformationCache &InfoCache = A.getInfoCache();
9369     if (Function *F = getAnchorScope()) {
9370       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9371       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9372     }
9373     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9374     bool TrackUse = false;
9375     // Track use for instructions which must produce undef or poison bits when
9376     // at least one operand contains such bits.
9377     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9378       TrackUse = true;
9379     return TrackUse;
9380   }
9381 
9382   /// See AbstractAttribute::getAsStr().
9383   const std::string getAsStr() const override {
9384     return getAssumed() ? "noundef" : "may-undef-or-poison";
9385   }
9386 
9387   ChangeStatus manifest(Attributor &A) override {
9388     // We don't manifest noundef attribute for dead positions because the
9389     // associated values with dead positions would be replaced with undef
9390     // values.
9391     bool UsedAssumedInformation = false;
9392     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9393                         UsedAssumedInformation))
9394       return ChangeStatus::UNCHANGED;
9395     // A position whose simplified value does not have any value is
9396     // considered to be dead. We don't manifest noundef in such positions for
9397     // the same reason above.
9398     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9399              .hasValue())
9400       return ChangeStatus::UNCHANGED;
9401     return AANoUndef::manifest(A);
9402   }
9403 };
9404 
9405 struct AANoUndefFloating : public AANoUndefImpl {
9406   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9407       : AANoUndefImpl(IRP, A) {}
9408 
9409   /// See AbstractAttribute::initialize(...).
9410   void initialize(Attributor &A) override {
9411     AANoUndefImpl::initialize(A);
9412     if (!getState().isAtFixpoint())
9413       if (Instruction *CtxI = getCtxI())
9414         followUsesInMBEC(*this, A, getState(), *CtxI);
9415   }
9416 
9417   /// See AbstractAttribute::updateImpl(...).
9418   ChangeStatus updateImpl(Attributor &A) override {
9419     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9420                             AANoUndef::StateType &T, bool Stripped) -> bool {
9421       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9422                                              DepClassTy::REQUIRED);
9423       if (!Stripped && this == &AA) {
9424         T.indicatePessimisticFixpoint();
9425       } else {
9426         const AANoUndef::StateType &S =
9427             static_cast<const AANoUndef::StateType &>(AA.getState());
9428         T ^= S;
9429       }
9430       return T.isValidState();
9431     };
9432 
9433     StateType T;
9434     bool UsedAssumedInformation = false;
9435     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9436                                           VisitValueCB, getCtxI(),
9437                                           UsedAssumedInformation))
9438       return indicatePessimisticFixpoint();
9439 
9440     return clampStateAndIndicateChange(getState(), T);
9441   }
9442 
9443   /// See AbstractAttribute::trackStatistics()
9444   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9445 };
9446 
9447 struct AANoUndefReturned final
9448     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9449   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9450       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9451 
9452   /// See AbstractAttribute::trackStatistics()
9453   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9454 };
9455 
9456 struct AANoUndefArgument final
9457     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9458   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9459       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9460 
9461   /// See AbstractAttribute::trackStatistics()
9462   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9463 };
9464 
9465 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9466   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9467       : AANoUndefFloating(IRP, A) {}
9468 
9469   /// See AbstractAttribute::trackStatistics()
9470   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9471 };
9472 
9473 struct AANoUndefCallSiteReturned final
9474     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9475   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9476       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9477 
9478   /// See AbstractAttribute::trackStatistics()
9479   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9480 };
9481 
9482 struct AACallEdgesImpl : public AACallEdges {
9483   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9484 
9485   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9486     return CalledFunctions;
9487   }
9488 
9489   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9490 
9491   virtual bool hasNonAsmUnknownCallee() const override {
9492     return HasUnknownCalleeNonAsm;
9493   }
9494 
9495   const std::string getAsStr() const override {
9496     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9497            std::to_string(CalledFunctions.size()) + "]";
9498   }
9499 
9500   void trackStatistics() const override {}
9501 
9502 protected:
9503   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9504     if (CalledFunctions.insert(Fn)) {
9505       Change = ChangeStatus::CHANGED;
9506       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9507                         << "\n");
9508     }
9509   }
9510 
9511   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9512     if (!HasUnknownCallee)
9513       Change = ChangeStatus::CHANGED;
9514     if (NonAsm && !HasUnknownCalleeNonAsm)
9515       Change = ChangeStatus::CHANGED;
9516     HasUnknownCalleeNonAsm |= NonAsm;
9517     HasUnknownCallee = true;
9518   }
9519 
9520 private:
9521   /// Optimistic set of functions that might be called by this position.
9522   SetVector<Function *> CalledFunctions;
9523 
9524   /// Is there any call with a unknown callee.
9525   bool HasUnknownCallee = false;
9526 
9527   /// Is there any call with a unknown callee, excluding any inline asm.
9528   bool HasUnknownCalleeNonAsm = false;
9529 };
9530 
9531 struct AACallEdgesCallSite : public AACallEdgesImpl {
9532   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9533       : AACallEdgesImpl(IRP, A) {}
9534   /// See AbstractAttribute::updateImpl(...).
9535   ChangeStatus updateImpl(Attributor &A) override {
9536     ChangeStatus Change = ChangeStatus::UNCHANGED;
9537 
9538     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9539                           bool Stripped) -> bool {
9540       if (Function *Fn = dyn_cast<Function>(&V)) {
9541         addCalledFunction(Fn, Change);
9542       } else {
9543         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9544         setHasUnknownCallee(true, Change);
9545       }
9546 
9547       // Explore all values.
9548       return true;
9549     };
9550 
9551     // Process any value that we might call.
9552     auto ProcessCalledOperand = [&](Value *V) {
9553       bool DummyValue = false;
9554       bool UsedAssumedInformation = false;
9555       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9556                                        DummyValue, VisitValue, nullptr,
9557                                        UsedAssumedInformation, false)) {
9558         // If we haven't gone through all values, assume that there are unknown
9559         // callees.
9560         setHasUnknownCallee(true, Change);
9561       }
9562     };
9563 
9564     CallBase *CB = cast<CallBase>(getCtxI());
9565 
9566     if (CB->isInlineAsm()) {
9567       setHasUnknownCallee(false, Change);
9568       return Change;
9569     }
9570 
9571     // Process callee metadata if available.
9572     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9573       for (auto &Op : MD->operands()) {
9574         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9575         if (Callee)
9576           addCalledFunction(Callee, Change);
9577       }
9578       return Change;
9579     }
9580 
9581     // The most simple case.
9582     ProcessCalledOperand(CB->getCalledOperand());
9583 
9584     // Process callback functions.
9585     SmallVector<const Use *, 4u> CallbackUses;
9586     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9587     for (const Use *U : CallbackUses)
9588       ProcessCalledOperand(U->get());
9589 
9590     return Change;
9591   }
9592 };
9593 
9594 struct AACallEdgesFunction : public AACallEdgesImpl {
9595   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9596       : AACallEdgesImpl(IRP, A) {}
9597 
9598   /// See AbstractAttribute::updateImpl(...).
9599   ChangeStatus updateImpl(Attributor &A) override {
9600     ChangeStatus Change = ChangeStatus::UNCHANGED;
9601 
9602     auto ProcessCallInst = [&](Instruction &Inst) {
9603       CallBase &CB = cast<CallBase>(Inst);
9604 
9605       auto &CBEdges = A.getAAFor<AACallEdges>(
9606           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9607       if (CBEdges.hasNonAsmUnknownCallee())
9608         setHasUnknownCallee(true, Change);
9609       if (CBEdges.hasUnknownCallee())
9610         setHasUnknownCallee(false, Change);
9611 
9612       for (Function *F : CBEdges.getOptimisticEdges())
9613         addCalledFunction(F, Change);
9614 
9615       return true;
9616     };
9617 
9618     // Visit all callable instructions.
9619     bool UsedAssumedInformation = false;
9620     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9621                                            UsedAssumedInformation,
9622                                            /* CheckBBLivenessOnly */ true)) {
9623       // If we haven't looked at all call like instructions, assume that there
9624       // are unknown callees.
9625       setHasUnknownCallee(true, Change);
9626     }
9627 
9628     return Change;
9629   }
9630 };
9631 
9632 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9633 private:
9634   struct QuerySet {
9635     void markReachable(const Function &Fn) {
9636       Reachable.insert(&Fn);
9637       Unreachable.erase(&Fn);
9638     }
9639 
9640     /// If there is no information about the function None is returned.
9641     Optional<bool> isCachedReachable(const Function &Fn) {
9642       // Assume that we can reach the function.
9643       // TODO: Be more specific with the unknown callee.
9644       if (CanReachUnknownCallee)
9645         return true;
9646 
9647       if (Reachable.count(&Fn))
9648         return true;
9649 
9650       if (Unreachable.count(&Fn))
9651         return false;
9652 
9653       return llvm::None;
9654     }
9655 
9656     /// Set of functions that we know for sure is reachable.
9657     DenseSet<const Function *> Reachable;
9658 
9659     /// Set of functions that are unreachable, but might become reachable.
9660     DenseSet<const Function *> Unreachable;
9661 
9662     /// If we can reach a function with a call to a unknown function we assume
9663     /// that we can reach any function.
9664     bool CanReachUnknownCallee = false;
9665   };
9666 
9667   struct QueryResolver : public QuerySet {
9668     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9669                         ArrayRef<const AACallEdges *> AAEdgesList) {
9670       ChangeStatus Change = ChangeStatus::UNCHANGED;
9671 
9672       for (auto *AAEdges : AAEdgesList) {
9673         if (AAEdges->hasUnknownCallee()) {
9674           if (!CanReachUnknownCallee)
9675             Change = ChangeStatus::CHANGED;
9676           CanReachUnknownCallee = true;
9677           return Change;
9678         }
9679       }
9680 
9681       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9682         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9683           Change = ChangeStatus::CHANGED;
9684           markReachable(*Fn);
9685         }
9686       }
9687       return Change;
9688     }
9689 
9690     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9691                      ArrayRef<const AACallEdges *> AAEdgesList,
9692                      const Function &Fn) {
9693       Optional<bool> Cached = isCachedReachable(Fn);
9694       if (Cached.hasValue())
9695         return Cached.getValue();
9696 
9697       // The query was not cached, thus it is new. We need to request an update
9698       // explicitly to make sure this the information is properly run to a
9699       // fixpoint.
9700       A.registerForUpdate(AA);
9701 
9702       // We need to assume that this function can't reach Fn to prevent
9703       // an infinite loop if this function is recursive.
9704       Unreachable.insert(&Fn);
9705 
9706       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9707       if (Result)
9708         markReachable(Fn);
9709       return Result;
9710     }
9711 
9712     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9713                           ArrayRef<const AACallEdges *> AAEdgesList,
9714                           const Function &Fn) const {
9715 
9716       // Handle the most trivial case first.
9717       for (auto *AAEdges : AAEdgesList) {
9718         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9719 
9720         if (Edges.count(const_cast<Function *>(&Fn)))
9721           return true;
9722       }
9723 
9724       SmallVector<const AAFunctionReachability *, 8> Deps;
9725       for (auto &AAEdges : AAEdgesList) {
9726         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9727 
9728         for (Function *Edge : Edges) {
9729           // We don't need a dependency if the result is reachable.
9730           const AAFunctionReachability &EdgeReachability =
9731               A.getAAFor<AAFunctionReachability>(
9732                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9733           Deps.push_back(&EdgeReachability);
9734 
9735           if (EdgeReachability.canReach(A, Fn))
9736             return true;
9737         }
9738       }
9739 
9740       // The result is false for now, set dependencies and leave.
9741       for (auto *Dep : Deps)
9742         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9743 
9744       return false;
9745     }
9746   };
9747 
9748   /// Get call edges that can be reached by this instruction.
9749   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9750                              const Instruction &Inst,
9751                              SmallVector<const AACallEdges *> &Result) const {
9752     // Determine call like instructions that we can reach from the inst.
9753     auto CheckCallBase = [&](Instruction &CBInst) {
9754       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9755         return true;
9756 
9757       auto &CB = cast<CallBase>(CBInst);
9758       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9759           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9760 
9761       Result.push_back(&AAEdges);
9762       return true;
9763     };
9764 
9765     bool UsedAssumedInformation = false;
9766     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9767                                              UsedAssumedInformation,
9768                                              /* CheckBBLivenessOnly */ true);
9769   }
9770 
9771 public:
9772   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9773       : AAFunctionReachability(IRP, A) {}
9774 
9775   bool canReach(Attributor &A, const Function &Fn) const override {
9776     if (!isValidState())
9777       return true;
9778 
9779     const AACallEdges &AAEdges =
9780         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9781 
9782     // Attributor returns attributes as const, so this function has to be
9783     // const for users of this attribute to use it without having to do
9784     // a const_cast.
9785     // This is a hack for us to be able to cache queries.
9786     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9787     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9788                                                           {&AAEdges}, Fn);
9789 
9790     return Result;
9791   }
9792 
9793   /// Can \p CB reach \p Fn
9794   bool canReach(Attributor &A, CallBase &CB,
9795                 const Function &Fn) const override {
9796     if (!isValidState())
9797       return true;
9798 
9799     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9800         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9801 
9802     // Attributor returns attributes as const, so this function has to be
9803     // const for users of this attribute to use it without having to do
9804     // a const_cast.
9805     // This is a hack for us to be able to cache queries.
9806     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9807     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9808 
9809     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9810 
9811     return Result;
9812   }
9813 
9814   bool instructionCanReach(Attributor &A, const Instruction &Inst,
9815                            const Function &Fn,
9816                            bool UseBackwards) const override {
9817     if (!isValidState())
9818       return true;
9819 
9820     if (UseBackwards)
9821       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
9822 
9823     const auto &Reachability = A.getAAFor<AAReachability>(
9824         *this, IRPosition::function(*getAssociatedFunction()),
9825         DepClassTy::REQUIRED);
9826 
9827     SmallVector<const AACallEdges *> CallEdges;
9828     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
9829     // Attributor returns attributes as const, so this function has to be
9830     // const for users of this attribute to use it without having to do
9831     // a const_cast.
9832     // This is a hack for us to be able to cache queries.
9833     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9834     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
9835     if (!AllKnown)
9836       InstQSet.CanReachUnknownCallee = true;
9837 
9838     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
9839   }
9840 
9841   /// See AbstractAttribute::updateImpl(...).
9842   ChangeStatus updateImpl(Attributor &A) override {
9843     const AACallEdges &AAEdges =
9844         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9845     ChangeStatus Change = ChangeStatus::UNCHANGED;
9846 
9847     Change |= WholeFunction.update(A, *this, {&AAEdges});
9848 
9849     for (auto &CBPair : CBQueries) {
9850       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9851           *this, IRPosition::callsite_function(*CBPair.first),
9852           DepClassTy::REQUIRED);
9853 
9854       Change |= CBPair.second.update(A, *this, {&AAEdges});
9855     }
9856 
9857     // Update the Instruction queries.
9858     const AAReachability *Reachability;
9859     if (!InstQueries.empty()) {
9860       Reachability = &A.getAAFor<AAReachability>(
9861           *this, IRPosition::function(*getAssociatedFunction()),
9862           DepClassTy::REQUIRED);
9863     }
9864 
9865     // Check for local callbases first.
9866     for (auto &InstPair : InstQueries) {
9867       SmallVector<const AACallEdges *> CallEdges;
9868       bool AllKnown =
9869           getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
9870       // Update will return change if we this effects any queries.
9871       if (!AllKnown)
9872         InstPair.second.CanReachUnknownCallee = true;
9873       Change |= InstPair.second.update(A, *this, CallEdges);
9874     }
9875 
9876     return Change;
9877   }
9878 
9879   const std::string getAsStr() const override {
9880     size_t QueryCount =
9881         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9882 
9883     return "FunctionReachability [" +
9884            std::to_string(WholeFunction.Reachable.size()) + "," +
9885            std::to_string(QueryCount) + "]";
9886   }
9887 
9888   void trackStatistics() const override {}
9889 
9890 private:
9891   bool canReachUnknownCallee() const override {
9892     return WholeFunction.CanReachUnknownCallee;
9893   }
9894 
9895   /// Used to answer if a the whole function can reacha a specific function.
9896   QueryResolver WholeFunction;
9897 
9898   /// Used to answer if a call base inside this function can reach a specific
9899   /// function.
9900   DenseMap<const CallBase *, QueryResolver> CBQueries;
9901 
9902   /// This is for instruction queries than scan "forward".
9903   DenseMap<const Instruction *, QueryResolver> InstQueries;
9904 };
9905 
9906 /// ---------------------- Assumption Propagation ------------------------------
9907 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
9908   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
9909                        const DenseSet<StringRef> &Known)
9910       : AAAssumptionInfo(IRP, A, Known) {}
9911 
9912   bool hasAssumption(const StringRef Assumption) const override {
9913     return isValidState() && setContains(Assumption);
9914   }
9915 
9916   /// See AbstractAttribute::getAsStr()
9917   const std::string getAsStr() const override {
9918     const SetContents &Known = getKnown();
9919     const SetContents &Assumed = getAssumed();
9920 
9921     const std::string KnownStr =
9922         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
9923     const std::string AssumedStr =
9924         (Assumed.isUniversal())
9925             ? "Universal"
9926             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
9927 
9928     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
9929   }
9930 };
9931 
9932 /// Propagates assumption information from parent functions to all of their
9933 /// successors. An assumption can be propagated if the containing function
9934 /// dominates the called function.
9935 ///
9936 /// We start with a "known" set of assumptions already valid for the associated
9937 /// function and an "assumed" set that initially contains all possible
9938 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
9939 /// contents as concrete values are known. The concrete values are seeded by the
9940 /// first nodes that are either entries into the call graph, or contains no
9941 /// assumptions. Each node is updated as the intersection of the assumed state
9942 /// with all of its predecessors.
9943 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
9944   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
9945       : AAAssumptionInfoImpl(IRP, A,
9946                              getAssumptions(*IRP.getAssociatedFunction())) {}
9947 
9948   /// See AbstractAttribute::manifest(...).
9949   ChangeStatus manifest(Attributor &A) override {
9950     const auto &Assumptions = getKnown();
9951 
9952     // Don't manifest a universal set if it somehow made it here.
9953     if (Assumptions.isUniversal())
9954       return ChangeStatus::UNCHANGED;
9955 
9956     Function *AssociatedFunction = getAssociatedFunction();
9957 
9958     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
9959 
9960     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
9961   }
9962 
9963   /// See AbstractAttribute::updateImpl(...).
9964   ChangeStatus updateImpl(Attributor &A) override {
9965     bool Changed = false;
9966 
9967     auto CallSitePred = [&](AbstractCallSite ACS) {
9968       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
9969           *this, IRPosition::callsite_function(*ACS.getInstruction()),
9970           DepClassTy::REQUIRED);
9971       // Get the set of assumptions shared by all of this function's callers.
9972       Changed |= getIntersection(AssumptionAA.getAssumed());
9973       return !getAssumed().empty() || !getKnown().empty();
9974     };
9975 
9976     bool UsedAssumedInformation = false;
9977     // Get the intersection of all assumptions held by this node's predecessors.
9978     // If we don't know all the call sites then this is either an entry into the
9979     // call graph or an empty node. This node is known to only contain its own
9980     // assumptions and can be propagated to its successors.
9981     if (!A.checkForAllCallSites(CallSitePred, *this, true,
9982                                 UsedAssumedInformation))
9983       return indicatePessimisticFixpoint();
9984 
9985     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
9986   }
9987 
9988   void trackStatistics() const override {}
9989 };
9990 
9991 /// Assumption Info defined for call sites.
9992 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
9993 
9994   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
9995       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
9996 
9997   /// See AbstractAttribute::initialize(...).
9998   void initialize(Attributor &A) override {
9999     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10000     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10001   }
10002 
10003   /// See AbstractAttribute::manifest(...).
10004   ChangeStatus manifest(Attributor &A) override {
10005     // Don't manifest a universal set if it somehow made it here.
10006     if (getKnown().isUniversal())
10007       return ChangeStatus::UNCHANGED;
10008 
10009     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10010     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10011 
10012     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10013   }
10014 
10015   /// See AbstractAttribute::updateImpl(...).
10016   ChangeStatus updateImpl(Attributor &A) override {
10017     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10018     auto &AssumptionAA =
10019         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10020     bool Changed = getIntersection(AssumptionAA.getAssumed());
10021     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10022   }
10023 
10024   /// See AbstractAttribute::trackStatistics()
10025   void trackStatistics() const override {}
10026 
10027 private:
10028   /// Helper to initialized the known set as all the assumptions this call and
10029   /// the callee contain.
10030   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10031     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10032     auto Assumptions = getAssumptions(CB);
10033     if (Function *F = IRP.getAssociatedFunction())
10034       set_union(Assumptions, getAssumptions(*F));
10035     if (Function *F = IRP.getAssociatedFunction())
10036       set_union(Assumptions, getAssumptions(*F));
10037     return Assumptions;
10038   }
10039 };
10040 
10041 AACallGraphNode *AACallEdgeIterator::operator*() const {
10042   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10043       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10044 }
10045 
10046 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10047 
10048 const char AAReturnedValues::ID = 0;
10049 const char AANoUnwind::ID = 0;
10050 const char AANoSync::ID = 0;
10051 const char AANoFree::ID = 0;
10052 const char AANonNull::ID = 0;
10053 const char AANoRecurse::ID = 0;
10054 const char AAWillReturn::ID = 0;
10055 const char AAUndefinedBehavior::ID = 0;
10056 const char AANoAlias::ID = 0;
10057 const char AAReachability::ID = 0;
10058 const char AANoReturn::ID = 0;
10059 const char AAIsDead::ID = 0;
10060 const char AADereferenceable::ID = 0;
10061 const char AAAlign::ID = 0;
10062 const char AANoCapture::ID = 0;
10063 const char AAValueSimplify::ID = 0;
10064 const char AAHeapToStack::ID = 0;
10065 const char AAPrivatizablePtr::ID = 0;
10066 const char AAMemoryBehavior::ID = 0;
10067 const char AAMemoryLocation::ID = 0;
10068 const char AAValueConstantRange::ID = 0;
10069 const char AAPotentialValues::ID = 0;
10070 const char AANoUndef::ID = 0;
10071 const char AACallEdges::ID = 0;
10072 const char AAFunctionReachability::ID = 0;
10073 const char AAPointerInfo::ID = 0;
10074 const char AAAssumptionInfo::ID = 0;
10075 
10076 // Macro magic to create the static generator function for attributes that
10077 // follow the naming scheme.
10078 
10079 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10080   case IRPosition::PK:                                                         \
10081     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10082 
10083 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10084   case IRPosition::PK:                                                         \
10085     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10086     ++NumAAs;                                                                  \
10087     break;
10088 
10089 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10090   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10091     CLASS *AA = nullptr;                                                       \
10092     switch (IRP.getPositionKind()) {                                           \
10093       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10094       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10095       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10096       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10097       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10098       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10099       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10100       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10101     }                                                                          \
10102     return *AA;                                                                \
10103   }
10104 
10105 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10106   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10107     CLASS *AA = nullptr;                                                       \
10108     switch (IRP.getPositionKind()) {                                           \
10109       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10110       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10111       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10112       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10113       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10114       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10115       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10116       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10117     }                                                                          \
10118     return *AA;                                                                \
10119   }
10120 
10121 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10122   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10123     CLASS *AA = nullptr;                                                       \
10124     switch (IRP.getPositionKind()) {                                           \
10125       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10126       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10127       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10128       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10129       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10130       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10131       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10132       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10133     }                                                                          \
10134     return *AA;                                                                \
10135   }
10136 
10137 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10138   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10139     CLASS *AA = nullptr;                                                       \
10140     switch (IRP.getPositionKind()) {                                           \
10141       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10142       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10143       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10144       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10145       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10146       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10147       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10148       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10149     }                                                                          \
10150     return *AA;                                                                \
10151   }
10152 
10153 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10154   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10155     CLASS *AA = nullptr;                                                       \
10156     switch (IRP.getPositionKind()) {                                           \
10157       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10158       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10159       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10160       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10161       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10162       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10163       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10164       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10165     }                                                                          \
10166     return *AA;                                                                \
10167   }
10168 
10169 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10170 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10171 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10172 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10173 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10174 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10175 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10176 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10177 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10178 
10179 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10180 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10181 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10182 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10183 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10184 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10185 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10186 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
10187 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10188 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10189 
10190 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10191 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10192 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10193 
10194 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10195 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10196 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10197 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10198 
10199 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10200 
10201 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10202 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10203 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10204 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10205 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10206 #undef SWITCH_PK_CREATE
10207 #undef SWITCH_PK_INV
10208